< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Print this page




 473     case T_SHORT:
 474     case T_INT:
 475       if (int_args < Argument::n_int_register_parameters_j) {
 476         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 477       } else {
 478         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 479         stk_args += 2;
 480       }
 481       break;
 482     case T_VOID:
 483       // halves of T_LONG or T_DOUBLE
 484       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 485       regs[i].set_bad();
 486       break;
 487     case T_LONG:
 488       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 489       // fall through
 490     case T_OBJECT:
 491     case T_ARRAY:
 492     case T_ADDRESS:
 493     case T_VALUETYPEPTR:
 494       if (int_args < Argument::n_int_register_parameters_j) {
 495         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 496       } else {
 497         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 498         stk_args += 2;
 499       }
 500       break;
 501     case T_FLOAT:
 502       if (fp_args < Argument::n_float_register_parameters_j) {
 503         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 504       } else {
 505         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 506         stk_args += 2;
 507       }
 508       break;
 509     case T_DOUBLE:
 510       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 511       if (fp_args < Argument::n_float_register_parameters_j) {
 512         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 513       } else {


 555     case T_INT:
 556       if (int_args < Argument::n_int_register_parameters_j+1) {
 557         regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
 558         int_args++;
 559       } else {
 560         return -1;
 561       }
 562       break;
 563     case T_VOID:
 564       // halves of T_LONG or T_DOUBLE
 565       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 566       regs[i].set_bad();
 567       break;
 568     case T_LONG:
 569       assert(sig_bt[i + 1] == T_VOID, "expecting half");
 570       // fall through
 571     case T_OBJECT:
 572     case T_ARRAY:
 573     case T_ADDRESS:
 574     case T_METADATA:
 575     case T_VALUETYPEPTR:
 576       if (int_args < Argument::n_int_register_parameters_j+1) {
 577         regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
 578         int_args++;
 579       } else {
 580         return -1;
 581       }
 582       break;
 583     case T_FLOAT:
 584       if (fp_args < Argument::n_float_register_parameters_j) {
 585         regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
 586         fp_args++;
 587       } else {
 588         return -1;
 589       }
 590       break;
 591     case T_DOUBLE:
 592       assert(sig_bt[i + 1] == T_VOID, "expecting half");
 593       if (fp_args < Argument::n_float_register_parameters_j) {
 594         regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
 595         fp_args++;


 635   __ mov(c_rarg0, rbx);
 636   __ mov(c_rarg1, rax);
 637   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 638 
 639   // De-allocate argument register save area
 640   if (frame::arg_reg_save_area_bytes != 0) {
 641     __ addptr(rsp, frame::arg_reg_save_area_bytes);
 642   }
 643 
 644   __ vzeroupper();
 645   __ pop_CPU_state();
 646   // restore sp
 647   __ mov(rsp, r13);
 648   __ bind(L);
 649 }
 650 
 651 // For each value type argument, sig includes the list of fields of
 652 // the value type. This utility function computes the number of
 653 // arguments for the call if value types are passed by reference (the
 654 // calling convention the interpreter expects).
 655 static int compute_total_args_passed_int(const GrowableArray<SigEntry>& sig_extended) {
 656   int total_args_passed = 0;
 657   if (ValueTypePassFieldsAsArgs) {
 658     for (int i = 0; i < sig_extended.length(); i++) {
 659       BasicType bt = sig_extended.at(i)._bt;
 660       if (bt == T_VALUETYPE) {


 661         // In sig_extended, a value type argument starts with:
 662         // T_VALUETYPE, followed by the types of the fields of the
 663         // value type and T_VOID to mark the end of the value
 664         // type. Value types are flattened so, for instance, in the
 665         // case of a value type with an int field and a value type
 666         // field that itself has 2 fields, an int and a long:
 667         // T_VALUETYPE T_INT T_VALUETYPE T_INT T_LONG T_VOID (second
 668         // slot for the T_LONG) T_VOID (inner T_VALUETYPE) T_VOID
 669         // (outer T_VALUETYPE)
 670         total_args_passed++;
 671         int vt = 1;
 672         do {
 673           i++;
 674           BasicType bt = sig_extended.at(i)._bt;
 675           BasicType prev_bt = sig_extended.at(i-1)._bt;
 676           if (bt == T_VALUETYPE) {
 677             vt++;
 678           } else if (bt == T_VOID &&
 679                      prev_bt != T_LONG &&
 680                      prev_bt != T_DOUBLE) {
 681             vt--;
 682           }
 683         } while (vt != 0);
 684       } else {
 685         total_args_passed++;
 686       }
 687     }
 688   } else {
 689     total_args_passed = sig_extended.length();
 690   }
 691   return total_args_passed;
 692 }
 693 
 694 
 695 static void gen_c2i_adapter_helper(MacroAssembler* masm,
 696                                    BasicType bt,
 697                                    BasicType prev_bt,
 698                                    size_t size_in_bytes,
 699                                    const VMRegPair& reg_pair,
 700                                    const Address& to,
 701                                    int extraspace,
 702                                    bool is_oop) {
 703   assert(bt != T_VALUETYPE || !ValueTypePassFieldsAsArgs, "no value type here");
 704   if (bt == T_VOID) {
 705     assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
 706     return;
 707   }
 708 
 709   // Say 4 args:


 721 
 722   bool wide = (size_in_bytes == wordSize);
 723   VMReg r_1 = reg_pair.first();
 724   VMReg r_2 = reg_pair.second();
 725   assert(r_2->is_valid() == wide, "invalid size");
 726   if (!r_1->is_valid()) {
 727     assert(!r_2->is_valid(), "must be invalid");
 728     return;
 729   }
 730 
 731   if (!r_1->is_XMMRegister()) {
 732     Register val = rax;
 733     assert_different_registers(to.base(), val);
 734     if(r_1->is_stack()) {
 735       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 736       __ load_sized_value(val, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
 737     } else {
 738       val = r_1->as_Register();
 739     }
 740     if (is_oop) {
 741       __ store_heap_oop(to, val);







 742     } else {
 743       __ store_sized_value(to, val, size_in_bytes);
 744     }
 745   } else {
 746     if (wide) {
 747       __ movdbl(to, r_1->as_XMMRegister());
 748     } else {
 749       __ movflt(to, r_1->as_XMMRegister());
 750     }
 751   }
 752 }
 753 
 754 static void gen_c2i_adapter(MacroAssembler *masm,
 755                             const GrowableArray<SigEntry>& sig_extended,
 756                             const VMRegPair *regs,
 757                             Label& skip_fixup,
 758                             address start,
 759                             OopMapSet*& oop_maps,
 760                             int& frame_complete,
 761                             int& frame_size_in_words) {
 762   // Before we get into the guts of the C2I adapter, see if we should be here
 763   // at all.  We've come from compiled code and are attempting to jump to the
 764   // interpreter, which means the caller made a static call to get here
 765   // (vcalls always get a compiled target if there is one).  Check for a
 766   // compiled target.  If there is one, we need to patch the caller's call.
 767   patch_callers_callsite(masm);
 768 
 769   __ bind(skip_fixup);
 770 
 771   bool has_value_argument = false;
 772   if (ValueTypePassFieldsAsArgs) {
 773     // Is there a value type argument?
 774     for (int i = 0; i < sig_extended.length() && !has_value_argument; i++) {
 775       has_value_argument = (sig_extended.at(i)._bt == T_VALUETYPE);
 776     }
 777     if (has_value_argument) {
 778       // There is at least a value type argument: we're coming from
 779       // compiled code so we have no buffers to back the value
 780       // types. Allocate the buffers here with a runtime call.
 781       oop_maps = new OopMapSet();
 782       OopMap* map = NULL;
 783 
 784       map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
 785 
 786       frame_complete = __ offset();
 787 
 788       __ set_last_Java_frame(noreg, noreg, NULL);
 789 
 790       __ mov(c_rarg0, r15_thread);
 791       __ mov(c_rarg1, rbx);
 792 
 793       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_value_types)));
 794 
 795       oop_maps->add_gc_map((int)(__ pc() - start), map);


 832 
 833   __ subptr(rsp, extraspace);
 834 
 835   // Store the return address in the expected location
 836   __ movptr(Address(rsp, 0), rax);
 837 
 838   // Now write the args into the outgoing interpreter space
 839 
 840   // next_arg_comp is the next argument from the compiler point of
 841   // view (value type fields are passed in registers/on the stack). In
 842   // sig_extended, a value type argument starts with: T_VALUETYPE,
 843   // followed by the types of the fields of the value type and T_VOID
 844   // to mark the end of the value type. ignored counts the number of
 845   // T_VALUETYPE/T_VOID. next_vt_arg is the next value type argument:
 846   // used to get the buffer for that argument from the pool of buffers
 847   // we allocated above and want to pass to the
 848   // interpreter. next_arg_int is the next argument from the
 849   // interpreter point of view (value types are passed by reference).
 850   bool has_oop_field = false;
 851   for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
 852        next_arg_comp < sig_extended.length(); next_arg_comp++) {
 853     assert(ignored <= next_arg_comp, "shouldn't skip over more slot than there are arguments");
 854     assert(next_arg_int < total_args_passed, "more arguments for the interpreter than expected?");
 855     BasicType bt = sig_extended.at(next_arg_comp)._bt;
 856     int st_off = (total_args_passed - next_arg_int) * Interpreter::stackElementSize;
 857     if (!ValueTypePassFieldsAsArgs || bt != T_VALUETYPE) {



 858       int next_off = st_off - Interpreter::stackElementSize;
 859       const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
 860       const VMRegPair reg_pair = regs[next_arg_comp-ignored];
 861       size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
 862       gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended.at(next_arg_comp-1)._bt : T_ILLEGAL,
 863                              size_in_bytes, reg_pair, Address(rsp, offset), extraspace, false);
 864       next_arg_int++;
 865 #ifdef ASSERT
 866       if (bt == T_LONG || bt == T_DOUBLE) {
 867         // Overwrite the unused slot with known junk
 868         __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
 869         __ movptr(Address(rsp, st_off), rax);
 870       }
 871 #endif /* ASSERT */
 872     } else {
 873       ignored++;
 874       // get the buffer from the just allocated pool of buffers
 875       int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_VALUETYPE);
 876       __ load_heap_oop(r11, Address(r10, index));
 877       next_vt_arg++; next_arg_int++;
 878       int vt = 1;
 879       // write fields we get from compiled code in registers/stack
 880       // slots to the buffer: we know we are done with that value type
 881       // argument when we hit the T_VOID that acts as an end of value
 882       // type delimiter for this value type. Value types are flattened
 883       // so we might encounter embedded value types. Each entry in
 884       // sig_extended contains a field offset in the buffer.
 885       do {
 886         next_arg_comp++;
 887         BasicType bt = sig_extended.at(next_arg_comp)._bt;
 888         BasicType prev_bt = sig_extended.at(next_arg_comp-1)._bt;
 889         if (bt == T_VALUETYPE) {
 890           vt++;
 891           ignored++;
 892         } else if (bt == T_VOID &&
 893                    prev_bt != T_LONG &&
 894                    prev_bt != T_DOUBLE) {
 895           vt--;
 896           ignored++;


 897         } else {
 898           int off = sig_extended.at(next_arg_comp)._offset;
 899           assert(off > 0, "offset in object should be positive");
 900           size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 901           bool is_oop = (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY);
 902           has_oop_field = has_oop_field || is_oop;
 903           gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended.at(next_arg_comp-1)._bt : T_ILLEGAL,
 904                                  size_in_bytes, regs[next_arg_comp-ignored], Address(r11, off), extraspace, is_oop);
 905         }
 906       } while (vt != 0);
 907       // pass the buffer to the interpreter
 908       __ movptr(Address(rsp, st_off), r11);
 909     }
 910   }
 911 
 912   // If a value type was allocated and initialized, apply post barrier to all oop fields
 913   if (has_value_argument && has_oop_field) {
 914     __ push(r13); // save senderSP
 915     __ push(rbx); // save callee
 916     // Allocate argument register save area
 917     if (frame::arg_reg_save_area_bytes != 0) {
 918       __ subptr(rsp, frame::arg_reg_save_area_bytes);
 919     }
 920     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::apply_post_barriers), r15_thread, r10);
 921     // De-allocate argument register save area
 922     if (frame::arg_reg_save_area_bytes != 0) {
 923       __ addptr(rsp, frame::arg_reg_save_area_bytes);


 980       __ load_heap_oop(dst, from);
 981     } else {
 982       __ load_sized_value(dst, from, size_in_bytes, is_signed);
 983     }
 984     if (r_1->is_stack()) {
 985       // Convert stack slot to an SP offset (+ wordSize to account for return address)
 986       int st_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 987       __ movq(Address(rsp, st_off), dst);
 988     }
 989   } else {
 990     if (wide) {
 991       __ movdbl(r_1->as_XMMRegister(), from);
 992     } else {
 993       __ movflt(r_1->as_XMMRegister(), from);
 994     }
 995   }
 996 }
 997 
 998 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 999                                     int comp_args_on_stack,
1000                                     const GrowableArray<SigEntry>& sig_extended,
1001                                     const VMRegPair *regs) {
1002 
1003   // Note: r13 contains the senderSP on entry. We must preserve it since
1004   // we may do a i2c -> c2i transition if we lose a race where compiled
1005   // code goes non-entrant while we get args ready.
1006   // In addition we use r13 to locate all the interpreter args as
1007   // we must align the stack to 16 bytes on an i2c entry else we
1008   // lose alignment we expect in all compiled code and register
1009   // save code can segv when fxsave instructions find improperly
1010   // aligned stack pointer.
1011 
1012   // Adapters can be frameless because they do not require the caller
1013   // to perform additional cleanup work, such as correcting the stack pointer.
1014   // An i2c adapter is frameless because the *caller* frame, which is interpreted,
1015   // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
1016   // even if a callee has modified the stack pointer.
1017   // A c2i adapter is frameless because the *callee* frame, which is interpreted,
1018   // routinely repairs its caller's stack pointer (from sender_sp, which is set
1019   // up via the senderSP register).
1020   // In other words, if *either* the caller or callee is interpreted, we can


1058 
1059   // Must preserve original SP for loading incoming arguments because
1060   // we need to align the outgoing SP for compiled code.
1061   __ movptr(r11, rsp);
1062 
1063   // Cut-out for having no stack args.  Since up to 2 int/oop args are passed
1064   // in registers, we will occasionally have no stack args.
1065   int comp_words_on_stack = 0;
1066   if (comp_args_on_stack) {
1067     // Sig words on the stack are greater-than VMRegImpl::stack0.  Those in
1068     // registers are below.  By subtracting stack0, we either get a negative
1069     // number (all values in registers) or the maximum stack slot accessed.
1070 
1071     // Convert 4-byte c2 stack slots to words.
1072     comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
1073     // Round up to miminum stack alignment, in wordSize
1074     comp_words_on_stack = align_up(comp_words_on_stack, 2);
1075     __ subptr(rsp, comp_words_on_stack * wordSize);
1076   }
1077 
1078 
1079   // Ensure compiled code always sees stack at proper alignment
1080   __ andptr(rsp, -16);
1081 
1082   // push the return address and misalign the stack that youngest frame always sees
1083   // as far as the placement of the call instruction
1084   __ push(rax);
1085 
1086   // Put saved SP in another register
1087   const Register saved_sp = rax;
1088   __ movptr(saved_sp, r11);
1089 
1090   // Will jump to the compiled code just as if compiled code was doing it.
1091   // Pre-load the register-jump target early, to schedule it better.





1092   __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));

1093 
1094 #if INCLUDE_JVMCI
1095   if (EnableJVMCI || UseAOT) {
1096     // check if this call should be routed towards a specific entry point
1097     __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
1098     Label no_alternative_target;
1099     __ jcc(Assembler::equal, no_alternative_target);
1100     __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
1101     __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
1102     __ bind(no_alternative_target);
1103   }
1104 #endif // INCLUDE_JVMCI
1105 
1106   int total_args_passed = compute_total_args_passed_int(sig_extended);
1107   // Now generate the shuffle code.  Pick up all register args and move the
1108   // rest through the floating point stack top.
1109 
1110   // next_arg_comp is the next argument from the compiler point of
1111   // view (value type fields are passed in registers/on the stack). In
1112   // sig_extended, a value type argument starts with: T_VALUETYPE,
1113   // followed by the types of the fields of the value type and T_VOID
1114   // to mark the end of the value type. ignored counts the number of
1115   // T_VALUETYPE/T_VOID. next_arg_int is the next argument from the
1116   // interpreter point of view (value types are passed by reference).
1117   for (int next_arg_comp = 0, ignored = 0, next_arg_int = 0; next_arg_comp < sig_extended.length(); next_arg_comp++) {
1118     assert(ignored <= next_arg_comp, "shouldn't skip over more slot than there are arguments");
1119     assert(next_arg_int < total_args_passed, "more arguments from the interpreter than expected?");
1120     BasicType bt = sig_extended.at(next_arg_comp)._bt;
1121     int ld_off = (total_args_passed - next_arg_int)*Interpreter::stackElementSize;
1122     if (!ValueTypePassFieldsAsArgs || bt != T_VALUETYPE) {
1123       // Load in argument order going down.
1124       // Point to interpreter value (vs. tag)



1125       int next_off = ld_off - Interpreter::stackElementSize;
1126       int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
1127       const VMRegPair reg_pair = regs[next_arg_comp-ignored];
1128       size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
1129       gen_i2c_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended.at(next_arg_comp-1)._bt : T_ILLEGAL,
1130                              size_in_bytes, reg_pair, Address(saved_sp, offset), false);
1131       next_arg_int++;
1132     } else {
1133       next_arg_int++;
1134       ignored++;
1135       // get the buffer for that value type
1136       __ movptr(r10, Address(saved_sp, ld_off));
1137       int vt = 1;
1138       // load fields to registers/stack slots from the buffer: we know
1139       // we are done with that value type argument when we hit the
1140       // T_VOID that acts as an end of value type delimiter for this
1141       // value type. Value types are flattened so we might encounter
1142       // embedded value types. Each entry in sig_extended contains a
1143       // field offset in the buffer.
1144       do {
1145         next_arg_comp++;
1146         BasicType bt = sig_extended.at(next_arg_comp)._bt;
1147         BasicType prev_bt = sig_extended.at(next_arg_comp-1)._bt;
1148         if (bt == T_VALUETYPE) {
1149           vt++;
1150           ignored++;
1151         } else if (bt == T_VOID &&
1152                    prev_bt != T_LONG &&
1153                    prev_bt != T_DOUBLE) {
1154           vt--;
1155           ignored++;


1156         } else {
1157           int off = sig_extended.at(next_arg_comp)._offset;
1158           assert(off > 0, "offset in object should be positive");
1159           size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
1160           bool is_oop = (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY);
1161           gen_i2c_adapter_helper(masm, bt, prev_bt, size_in_bytes, regs[next_arg_comp - ignored], Address(r10, off), is_oop);
1162         }
1163       } while (vt != 0);
1164     }
1165   }
1166 
1167   // 6243940 We might end up in handle_wrong_method if
1168   // the callee is deoptimized as we race thru here. If that
1169   // happens we don't want to take a safepoint because the
1170   // caller frame will look interpreted and arguments are now
1171   // "compiled" so it is much better to make this transition
1172   // invisible to the stack walking code. Unfortunately if
1173   // we try and find the callee by normal means a safepoint
1174   // is possible. So we stash the desired callee in the thread
1175   // and the vm will find there should this case occur.
1176 
1177   __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
1178 
1179   // put Method* where a c2i would expect should we end up there
1180   // only needed because of c2 resolve stubs return Method* as a result in
1181   // rax
1182   __ mov(rax, rbx);
1183   __ jmp(r11);
1184 }
1185 
1186 // ---------------------------------------------------------------
1187 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1188                                                             int comp_args_on_stack,
1189                                                             const GrowableArray<SigEntry>& sig_extended,
1190                                                             const VMRegPair *regs,



1191                                                             AdapterFingerPrint* fingerprint,
1192                                                             AdapterBlob*& new_adapter) {
1193   address i2c_entry = __ pc();
1194 
1195   gen_i2c_adapter(masm, comp_args_on_stack, sig_extended, regs);






1196 
1197   // -------------------------------------------------------------------------
1198   // Generate a C2I adapter.  On entry we know rbx holds the Method* during calls
1199   // to the interpreter.  The args start out packed in the compiled layout.  They
1200   // need to be unpacked into the interpreter layout.  This will almost always
1201   // require some stack space.  We grow the current (compiled) stack, then repack
1202   // the args.  We  finally end in a jump to the generic interpreter entry point.
1203   // On exit from the interpreter, the interpreter will restore our SP (lest the
1204   // compiled code, which relys solely on SP and not RBP, get sick).
1205 
1206   address c2i_unverified_entry = __ pc();
1207   Label skip_fixup;
1208   Label ok;
1209 
1210   Register holder = rax;
1211   Register receiver = j_rarg0;
1212   Register temp = rbx;
1213 
1214   {
1215     __ load_klass(temp, receiver);
1216     __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
1217     __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
1218     __ jcc(Assembler::equal, ok);
1219     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1220 
1221     __ bind(ok);
1222     // Method might have been compiled since the call site was patched to
1223     // interpreted if that is the case treat it as a miss so we can get
1224     // the call site corrected.
1225     __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
1226     __ jcc(Assembler::equal, skip_fixup);
1227     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1228   }
1229 
1230   address c2i_entry = __ pc();

1231 
1232   OopMapSet* oop_maps = NULL;
1233   int frame_complete = CodeOffsets::frame_never_safe;
1234   int frame_size_in_words = 0;
1235   gen_c2i_adapter(masm, sig_extended, regs, skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words);







1236 
1237   __ flush();
1238   new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps);
1239 
1240   // If the method has value types arguments, save the extended signature as symbol in
1241   // the AdapterHandlerEntry to be used for scalarization of value type arguments.
1242   Symbol* extended_signature = NULL;
1243   bool has_value_argument = false;
1244   Thread* THREAD = Thread::current();
1245   ResourceMark rm(THREAD);
1246   int length = sig_extended.length();
1247   char* sig_str = NEW_RESOURCE_ARRAY(char, 2*length + 3);
1248   int idx = 0;
1249   sig_str[idx++] = '(';
1250   for (int index = 0; index < length; index++) {
1251     BasicType bt = sig_extended.at(index)._bt;
1252     if (bt == T_VALUETYPE) {
1253       has_value_argument = true;
1254     } else if (bt == T_VALUETYPEPTR) {
1255       has_value_argument = true;
1256       // non-flattened value type field
1257       sig_str[idx++] = type2char(T_VALUETYPE);
1258       sig_str[idx++] = ';';
1259     } else if (bt == T_VOID) {
1260       // Ignore
1261     } else {
1262       if (bt == T_ARRAY) {
1263         bt = T_OBJECT; // We don't know the element type, treat as Object
1264       }
1265       sig_str[idx++] = type2char(bt);
1266       if (bt == T_OBJECT) {
1267         sig_str[idx++] = ';';
1268       }
1269     }
1270   }
1271   sig_str[idx++] = ')';
1272   sig_str[idx++] = '\0';
1273   if (has_value_argument) {
1274     // Extended signature is only required if a value type argument is passed
1275     extended_signature = SymbolTable::new_permanent_symbol(sig_str, THREAD);
1276   }
1277 
1278   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, extended_signature);
1279 }
1280 
1281 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1282                                          VMRegPair *regs,
1283                                          VMRegPair *regs2,
1284                                          int total_args_passed) {
1285   assert(regs2 == NULL, "not needed on x86");
1286 // We return the amount of VMRegImpl stack slots we need to reserve for all
1287 // the arguments NOT counting out_preserve_stack_slots.
1288 
1289 // NOTE: These arrays will have to change when c1 is ported
1290 #ifdef _WIN64
1291     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1292       c_rarg0, c_rarg1, c_rarg2, c_rarg3
1293     };
1294     static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1295       c_farg0, c_farg1, c_farg2, c_farg3
1296     };
1297 #else
1298     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {


4329   // rdx: exception pc
4330   // Jump to handler
4331 
4332   __ jmp(r8);
4333 
4334   // Make sure all code is generated
4335   masm->flush();
4336 
4337   // Set exception blob
4338   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
4339 }
4340 #endif // COMPILER2
4341 
4342 BufferedValueTypeBlob* SharedRuntime::generate_buffered_value_type_adapter(const ValueKlass* vk) {
4343   BufferBlob* buf = BufferBlob::create("value types pack/unpack", 16 * K);
4344   CodeBuffer buffer(buf);
4345   short buffer_locs[20];
4346   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
4347                                          sizeof(buffer_locs)/sizeof(relocInfo));
4348 
4349   MacroAssembler _masm(&buffer);
4350   MacroAssembler* masm = &_masm;
4351 
4352   const Array<SigEntry>* sig_vk = vk->extended_sig();
4353   const Array<VMRegPair>* regs = vk->return_regs();
4354 
4355   int pack_fields_off = __ offset();
4356 
4357   int j = 1;
4358   for (int i = 0; i < sig_vk->length(); i++) {
4359     BasicType bt = sig_vk->at(i)._bt;
4360     if (bt == T_VALUETYPE) {
4361       continue;
4362     }
4363     if (bt == T_VOID) {
4364       if (sig_vk->at(i-1)._bt == T_LONG ||
4365           sig_vk->at(i-1)._bt == T_DOUBLE) {
4366         j++;
4367       }
4368       continue;
4369     }
4370     int off = sig_vk->at(i)._offset;

4371     VMRegPair pair = regs->at(j);
4372     VMReg r_1 = pair.first();
4373     VMReg r_2 = pair.second();
4374     Address to(rax, off);
4375     if (bt == T_FLOAT) {
4376       __ movflt(to, r_1->as_XMMRegister());
4377     } else if (bt == T_DOUBLE) {
4378       __ movdbl(to, r_1->as_XMMRegister());
4379     } else if (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY) {
4380       __ store_heap_oop(to, r_1->as_Register());










4381     } else {
4382       assert(is_java_primitive(bt), "unexpected basic type");

4383       size_t size_in_bytes = type2aelembytes(bt);
4384       __ store_sized_value(to, r_1->as_Register(), size_in_bytes);
4385     }
4386     j++;
4387   }
4388   assert(j == regs->length(), "missed a field?");
4389 
4390   __ ret(0);
4391 
4392   int unpack_fields_off = __ offset();
4393 
4394   j = 1;
4395   for (int i = 0; i < sig_vk->length(); i++) {
4396     BasicType bt = sig_vk->at(i)._bt;
4397     if (bt == T_VALUETYPE) {
4398       continue;
4399     }
4400     if (bt == T_VOID) {
4401       if (sig_vk->at(i-1)._bt == T_LONG ||
4402           sig_vk->at(i-1)._bt == T_DOUBLE) {
4403         j++;
4404       }
4405       continue;
4406     }
4407     int off = sig_vk->at(i)._offset;

4408     VMRegPair pair = regs->at(j);
4409     VMReg r_1 = pair.first();
4410     VMReg r_2 = pair.second();
4411     Address from(rax, off);
4412     if (bt == T_FLOAT) {
4413       __ movflt(r_1->as_XMMRegister(), from);
4414     } else if (bt == T_DOUBLE) {
4415       __ movdbl(r_1->as_XMMRegister(), from);
4416     } else if (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY) {

4417       __ load_heap_oop(r_1->as_Register(), from);
4418     } else {
4419       assert(is_java_primitive(bt), "unexpected basic type");

4420       size_t size_in_bytes = type2aelembytes(bt);
4421       __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
4422     }
4423     j++;
4424   }
4425   assert(j == regs->length(), "missed a field?");
4426 
4427   if (StressValueTypeReturnedAsFields) {
4428     __ load_klass(rax, rax);
4429     __ orptr(rax, 1);
4430   }
4431 
4432   __ ret(0);
4433 
4434   __ flush();
4435 
4436   return BufferedValueTypeBlob::create(&buffer, pack_fields_off, unpack_fields_off);
4437 }


 473     case T_SHORT:
 474     case T_INT:
 475       if (int_args < Argument::n_int_register_parameters_j) {
 476         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 477       } else {
 478         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 479         stk_args += 2;
 480       }
 481       break;
 482     case T_VOID:
 483       // halves of T_LONG or T_DOUBLE
 484       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 485       regs[i].set_bad();
 486       break;
 487     case T_LONG:
 488       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 489       // fall through
 490     case T_OBJECT:
 491     case T_ARRAY:
 492     case T_ADDRESS:

 493       if (int_args < Argument::n_int_register_parameters_j) {
 494         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 495       } else {
 496         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 497         stk_args += 2;
 498       }
 499       break;
 500     case T_FLOAT:
 501       if (fp_args < Argument::n_float_register_parameters_j) {
 502         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 503       } else {
 504         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 505         stk_args += 2;
 506       }
 507       break;
 508     case T_DOUBLE:
 509       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 510       if (fp_args < Argument::n_float_register_parameters_j) {
 511         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 512       } else {


 554     case T_INT:
 555       if (int_args < Argument::n_int_register_parameters_j+1) {
 556         regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
 557         int_args++;
 558       } else {
 559         return -1;
 560       }
 561       break;
 562     case T_VOID:
 563       // halves of T_LONG or T_DOUBLE
 564       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 565       regs[i].set_bad();
 566       break;
 567     case T_LONG:
 568       assert(sig_bt[i + 1] == T_VOID, "expecting half");
 569       // fall through
 570     case T_OBJECT:
 571     case T_ARRAY:
 572     case T_ADDRESS:
 573     case T_METADATA:

 574       if (int_args < Argument::n_int_register_parameters_j+1) {
 575         regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
 576         int_args++;
 577       } else {
 578         return -1;
 579       }
 580       break;
 581     case T_FLOAT:
 582       if (fp_args < Argument::n_float_register_parameters_j) {
 583         regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
 584         fp_args++;
 585       } else {
 586         return -1;
 587       }
 588       break;
 589     case T_DOUBLE:
 590       assert(sig_bt[i + 1] == T_VOID, "expecting half");
 591       if (fp_args < Argument::n_float_register_parameters_j) {
 592         regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
 593         fp_args++;


 633   __ mov(c_rarg0, rbx);
 634   __ mov(c_rarg1, rax);
 635   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 636 
 637   // De-allocate argument register save area
 638   if (frame::arg_reg_save_area_bytes != 0) {
 639     __ addptr(rsp, frame::arg_reg_save_area_bytes);
 640   }
 641 
 642   __ vzeroupper();
 643   __ pop_CPU_state();
 644   // restore sp
 645   __ mov(rsp, r13);
 646   __ bind(L);
 647 }
 648 
 649 // For each value type argument, sig includes the list of fields of
 650 // the value type. This utility function computes the number of
 651 // arguments for the call if value types are passed by reference (the
 652 // calling convention the interpreter expects).
 653 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) {
 654   int total_args_passed = 0;
 655   if (ValueTypePassFieldsAsArgs) {
 656     for (int i = 0; i < sig_extended->length(); i++) {
 657       BasicType bt = sig_extended->at(i)._bt;
 658       if (SigEntry::is_reserved_entry(sig_extended, i)) {
 659         // Ignore reserved entry
 660       } else if (bt == T_VALUETYPE) {
 661         // In sig_extended, a value type argument starts with:
 662         // T_VALUETYPE, followed by the types of the fields of the
 663         // value type and T_VOID to mark the end of the value
 664         // type. Value types are flattened so, for instance, in the
 665         // case of a value type with an int field and a value type
 666         // field that itself has 2 fields, an int and a long:
 667         // T_VALUETYPE T_INT T_VALUETYPE T_INT T_LONG T_VOID (second
 668         // slot for the T_LONG) T_VOID (inner T_VALUETYPE) T_VOID
 669         // (outer T_VALUETYPE)
 670         total_args_passed++;
 671         int vt = 1;
 672         do {
 673           i++;
 674           BasicType bt = sig_extended->at(i)._bt;
 675           BasicType prev_bt = sig_extended->at(i-1)._bt;
 676           if (bt == T_VALUETYPE) {
 677             vt++;
 678           } else if (bt == T_VOID &&
 679                      prev_bt != T_LONG &&
 680                      prev_bt != T_DOUBLE) {
 681             vt--;
 682           }
 683         } while (vt != 0);
 684       } else {
 685         total_args_passed++;
 686       }
 687     }
 688   } else {
 689     total_args_passed = sig_extended->length();
 690   }
 691   return total_args_passed;
 692 }
 693 
 694 
 695 static void gen_c2i_adapter_helper(MacroAssembler* masm,
 696                                    BasicType bt,
 697                                    BasicType prev_bt,
 698                                    size_t size_in_bytes,
 699                                    const VMRegPair& reg_pair,
 700                                    const Address& to,
 701                                    int extraspace,
 702                                    bool is_oop) {
 703   assert(bt != T_VALUETYPE || !ValueTypePassFieldsAsArgs, "no value type here");
 704   if (bt == T_VOID) {
 705     assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
 706     return;
 707   }
 708 
 709   // Say 4 args:


 721 
 722   bool wide = (size_in_bytes == wordSize);
 723   VMReg r_1 = reg_pair.first();
 724   VMReg r_2 = reg_pair.second();
 725   assert(r_2->is_valid() == wide, "invalid size");
 726   if (!r_1->is_valid()) {
 727     assert(!r_2->is_valid(), "must be invalid");
 728     return;
 729   }
 730 
 731   if (!r_1->is_XMMRegister()) {
 732     Register val = rax;
 733     assert_different_registers(to.base(), val);
 734     if(r_1->is_stack()) {
 735       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 736       __ load_sized_value(val, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false);
 737     } else {
 738       val = r_1->as_Register();
 739     }
 740     if (is_oop) {
 741       // We don't need barriers because the destination is a newly allocated object.
 742       // Also, we cannot use store_heap_oop(to, val) because it uses r8 as tmp.
 743       if (UseCompressedOops) {
 744         __ encode_heap_oop(val);
 745         __ movl(to, val);
 746       } else {
 747         __ movptr(to, val);
 748       }
 749     } else {
 750       __ store_sized_value(to, val, size_in_bytes);
 751     }
 752   } else {
 753     if (wide) {
 754       __ movdbl(to, r_1->as_XMMRegister());
 755     } else {
 756       __ movflt(to, r_1->as_XMMRegister());
 757     }
 758   }
 759 }
 760 
 761 static void gen_c2i_adapter(MacroAssembler *masm,
 762                             const GrowableArray<SigEntry>* sig_extended,
 763                             const VMRegPair *regs,
 764                             Label& skip_fixup,
 765                             address start,
 766                             OopMapSet*& oop_maps,
 767                             int& frame_complete,
 768                             int& frame_size_in_words) {
 769   // Before we get into the guts of the C2I adapter, see if we should be here
 770   // at all.  We've come from compiled code and are attempting to jump to the
 771   // interpreter, which means the caller made a static call to get here
 772   // (vcalls always get a compiled target if there is one).  Check for a
 773   // compiled target.  If there is one, we need to patch the caller's call.
 774   patch_callers_callsite(masm);
 775 
 776   __ bind(skip_fixup);
 777 
 778   bool has_value_argument = false;
 779   if (ValueTypePassFieldsAsArgs) {
 780     // Is there a value type argument?
 781     for (int i = 0; i < sig_extended->length() && !has_value_argument; i++) {
 782       has_value_argument = (sig_extended->at(i)._bt == T_VALUETYPE);
 783     }
 784     if (has_value_argument) {
 785       // There is at least a value type argument: we're coming from
 786       // compiled code so we have no buffers to back the value
 787       // types. Allocate the buffers here with a runtime call.
 788       oop_maps = new OopMapSet();
 789       OopMap* map = NULL;
 790 
 791       map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
 792 
 793       frame_complete = __ offset();
 794 
 795       __ set_last_Java_frame(noreg, noreg, NULL);
 796 
 797       __ mov(c_rarg0, r15_thread);
 798       __ mov(c_rarg1, rbx);
 799 
 800       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_value_types)));
 801 
 802       oop_maps->add_gc_map((int)(__ pc() - start), map);


 839 
 840   __ subptr(rsp, extraspace);
 841 
 842   // Store the return address in the expected location
 843   __ movptr(Address(rsp, 0), rax);
 844 
 845   // Now write the args into the outgoing interpreter space
 846 
 847   // next_arg_comp is the next argument from the compiler point of
 848   // view (value type fields are passed in registers/on the stack). In
 849   // sig_extended, a value type argument starts with: T_VALUETYPE,
 850   // followed by the types of the fields of the value type and T_VOID
 851   // to mark the end of the value type. ignored counts the number of
 852   // T_VALUETYPE/T_VOID. next_vt_arg is the next value type argument:
 853   // used to get the buffer for that argument from the pool of buffers
 854   // we allocated above and want to pass to the
 855   // interpreter. next_arg_int is the next argument from the
 856   // interpreter point of view (value types are passed by reference).
 857   bool has_oop_field = false;
 858   for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
 859        next_arg_comp < sig_extended->length(); next_arg_comp++) {
 860     assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments");
 861     assert(next_arg_int <= total_args_passed, "more arguments for the interpreter than expected?");
 862     BasicType bt = sig_extended->at(next_arg_comp)._bt;
 863     int st_off = (total_args_passed - next_arg_int) * Interpreter::stackElementSize;
 864     if (!ValueTypePassFieldsAsArgs || bt != T_VALUETYPE) {
 865       if (SigEntry::is_reserved_entry(sig_extended, next_arg_comp)) {
 866         continue; // Ignore reserved entry
 867       }
 868       int next_off = st_off - Interpreter::stackElementSize;
 869       const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
 870       const VMRegPair reg_pair = regs[next_arg_comp-ignored];
 871       size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
 872       gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 873                              size_in_bytes, reg_pair, Address(rsp, offset), extraspace, false);
 874       next_arg_int++;
 875 #ifdef ASSERT
 876       if (bt == T_LONG || bt == T_DOUBLE) {
 877         // Overwrite the unused slot with known junk
 878         __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
 879         __ movptr(Address(rsp, st_off), rax);
 880       }
 881 #endif /* ASSERT */
 882     } else {
 883       ignored++;
 884       // get the buffer from the just allocated pool of buffers
 885       int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_VALUETYPE);
 886       __ load_heap_oop(r11, Address(r10, index));
 887       next_vt_arg++; next_arg_int++;
 888       int vt = 1;
 889       // write fields we get from compiled code in registers/stack
 890       // slots to the buffer: we know we are done with that value type
 891       // argument when we hit the T_VOID that acts as an end of value
 892       // type delimiter for this value type. Value types are flattened
 893       // so we might encounter embedded value types. Each entry in
 894       // sig_extended contains a field offset in the buffer.
 895       do {
 896         next_arg_comp++;
 897         BasicType bt = sig_extended->at(next_arg_comp)._bt;
 898         BasicType prev_bt = sig_extended->at(next_arg_comp-1)._bt;
 899         if (bt == T_VALUETYPE) {
 900           vt++;
 901           ignored++;
 902         } else if (bt == T_VOID &&
 903                    prev_bt != T_LONG &&
 904                    prev_bt != T_DOUBLE) {
 905           vt--;
 906           ignored++;
 907         } else if (SigEntry::is_reserved_entry(sig_extended, next_arg_comp)) {
 908           // Ignore reserved entry
 909         } else {
 910           int off = sig_extended->at(next_arg_comp)._offset;
 911           assert(off > 0, "offset in object should be positive");
 912           size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 913           bool is_oop = (bt == T_OBJECT || bt == T_ARRAY);
 914           has_oop_field = has_oop_field || is_oop;
 915           gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 916                                  size_in_bytes, regs[next_arg_comp-ignored], Address(r11, off), extraspace, is_oop);
 917         }
 918       } while (vt != 0);
 919       // pass the buffer to the interpreter
 920       __ movptr(Address(rsp, st_off), r11);
 921     }
 922   }
 923 
 924   // If a value type was allocated and initialized, apply post barrier to all oop fields
 925   if (has_value_argument && has_oop_field) {
 926     __ push(r13); // save senderSP
 927     __ push(rbx); // save callee
 928     // Allocate argument register save area
 929     if (frame::arg_reg_save_area_bytes != 0) {
 930       __ subptr(rsp, frame::arg_reg_save_area_bytes);
 931     }
 932     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::apply_post_barriers), r15_thread, r10);
 933     // De-allocate argument register save area
 934     if (frame::arg_reg_save_area_bytes != 0) {
 935       __ addptr(rsp, frame::arg_reg_save_area_bytes);


 992       __ load_heap_oop(dst, from);
 993     } else {
 994       __ load_sized_value(dst, from, size_in_bytes, is_signed);
 995     }
 996     if (r_1->is_stack()) {
 997       // Convert stack slot to an SP offset (+ wordSize to account for return address)
 998       int st_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
 999       __ movq(Address(rsp, st_off), dst);
1000     }
1001   } else {
1002     if (wide) {
1003       __ movdbl(r_1->as_XMMRegister(), from);
1004     } else {
1005       __ movflt(r_1->as_XMMRegister(), from);
1006     }
1007   }
1008 }
1009 
1010 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
1011                                     int comp_args_on_stack,
1012                                     const GrowableArray<SigEntry>* sig,
1013                                     const VMRegPair *regs) {
1014 
1015   // Note: r13 contains the senderSP on entry. We must preserve it since
1016   // we may do a i2c -> c2i transition if we lose a race where compiled
1017   // code goes non-entrant while we get args ready.
1018   // In addition we use r13 to locate all the interpreter args as
1019   // we must align the stack to 16 bytes on an i2c entry else we
1020   // lose alignment we expect in all compiled code and register
1021   // save code can segv when fxsave instructions find improperly
1022   // aligned stack pointer.
1023 
1024   // Adapters can be frameless because they do not require the caller
1025   // to perform additional cleanup work, such as correcting the stack pointer.
1026   // An i2c adapter is frameless because the *caller* frame, which is interpreted,
1027   // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
1028   // even if a callee has modified the stack pointer.
1029   // A c2i adapter is frameless because the *callee* frame, which is interpreted,
1030   // routinely repairs its caller's stack pointer (from sender_sp, which is set
1031   // up via the senderSP register).
1032   // In other words, if *either* the caller or callee is interpreted, we can


1070 
1071   // Must preserve original SP for loading incoming arguments because
1072   // we need to align the outgoing SP for compiled code.
1073   __ movptr(r11, rsp);
1074 
1075   // Cut-out for having no stack args.  Since up to 2 int/oop args are passed
1076   // in registers, we will occasionally have no stack args.
1077   int comp_words_on_stack = 0;
1078   if (comp_args_on_stack) {
1079     // Sig words on the stack are greater-than VMRegImpl::stack0.  Those in
1080     // registers are below.  By subtracting stack0, we either get a negative
1081     // number (all values in registers) or the maximum stack slot accessed.
1082 
1083     // Convert 4-byte c2 stack slots to words.
1084     comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
1085     // Round up to miminum stack alignment, in wordSize
1086     comp_words_on_stack = align_up(comp_words_on_stack, 2);
1087     __ subptr(rsp, comp_words_on_stack * wordSize);
1088   }
1089 

1090   // Ensure compiled code always sees stack at proper alignment
1091   __ andptr(rsp, -16);
1092 
1093   // push the return address and misalign the stack that youngest frame always sees
1094   // as far as the placement of the call instruction
1095   __ push(rax);
1096 
1097   // Put saved SP in another register
1098   const Register saved_sp = rax;
1099   __ movptr(saved_sp, r11);
1100 
1101   // Will jump to the compiled code just as if compiled code was doing it.
1102   // Pre-load the register-jump target early, to schedule it better.
1103   if (StressValueTypePassFieldsAsArgs) {
1104     // For stress testing, don't unpack value types in the i2c adapter but
1105     // call the value type entry point and let it take care of unpacking.
1106     __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_value_offset())));
1107   } else {
1108     __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
1109   }
1110 
1111 #if INCLUDE_JVMCI
1112   if (EnableJVMCI || UseAOT) {
1113     // check if this call should be routed towards a specific entry point
1114     __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
1115     Label no_alternative_target;
1116     __ jcc(Assembler::equal, no_alternative_target);
1117     __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
1118     __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
1119     __ bind(no_alternative_target);
1120   }
1121 #endif // INCLUDE_JVMCI
1122 
1123   int total_args_passed = compute_total_args_passed_int(sig);
1124   // Now generate the shuffle code.  Pick up all register args and move the
1125   // rest through the floating point stack top.
1126 
1127   // next_arg_comp is the next argument from the compiler point of
1128   // view (value type fields are passed in registers/on the stack). In
1129   // sig_extended, a value type argument starts with: T_VALUETYPE,
1130   // followed by the types of the fields of the value type and T_VOID
1131   // to mark the end of the value type. ignored counts the number of
1132   // T_VALUETYPE/T_VOID. next_arg_int is the next argument from the
1133   // interpreter point of view (value types are passed by reference).
1134   for (int next_arg_comp = 0, ignored = 0, next_arg_int = 0; next_arg_comp < sig->length(); next_arg_comp++) {
1135     assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments");
1136     assert(next_arg_int <= total_args_passed, "more arguments from the interpreter than expected?");
1137     BasicType bt = sig->at(next_arg_comp)._bt;
1138     int ld_off = (total_args_passed - next_arg_int)*Interpreter::stackElementSize;
1139     if (!ValueTypePassFieldsAsArgs || bt != T_VALUETYPE) {
1140       // Load in argument order going down.
1141       // Point to interpreter value (vs. tag)
1142       if (SigEntry::is_reserved_entry(sig, next_arg_comp)) {
1143         continue; // Ignore reserved entry
1144       }
1145       int next_off = ld_off - Interpreter::stackElementSize;
1146       int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
1147       const VMRegPair reg_pair = regs[next_arg_comp-ignored];
1148       size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
1149       gen_i2c_adapter_helper(masm, bt, next_arg_comp > 0 ? sig->at(next_arg_comp-1)._bt : T_ILLEGAL,
1150                              size_in_bytes, reg_pair, Address(saved_sp, offset), false);
1151       next_arg_int++;
1152     } else {
1153       next_arg_int++;
1154       ignored++;
1155       // get the buffer for that value type
1156       __ movptr(r10, Address(saved_sp, ld_off));
1157       int vt = 1;
1158       // load fields to registers/stack slots from the buffer: we know
1159       // we are done with that value type argument when we hit the
1160       // T_VOID that acts as an end of value type delimiter for this
1161       // value type. Value types are flattened so we might encounter
1162       // embedded value types. Each entry in sig_extended contains a
1163       // field offset in the buffer.
1164       do {
1165         next_arg_comp++;
1166         BasicType bt = sig->at(next_arg_comp)._bt;
1167         BasicType prev_bt = sig->at(next_arg_comp-1)._bt;
1168         if (bt == T_VALUETYPE) {
1169           vt++;
1170           ignored++;
1171         } else if (bt == T_VOID &&
1172                    prev_bt != T_LONG &&
1173                    prev_bt != T_DOUBLE) {
1174           vt--;
1175           ignored++;
1176         } else if (SigEntry::is_reserved_entry(sig, next_arg_comp)) {
1177           // Ignore reserved entry
1178         } else {
1179           int off = sig->at(next_arg_comp)._offset;
1180           assert(off > 0, "offset in object should be positive");
1181           size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
1182           bool is_oop = (bt == T_OBJECT || bt == T_ARRAY);
1183           gen_i2c_adapter_helper(masm, bt, prev_bt, size_in_bytes, regs[next_arg_comp - ignored], Address(r10, off), is_oop);
1184         }
1185       } while (vt != 0);
1186     }
1187   }
1188 
1189   // 6243940 We might end up in handle_wrong_method if
1190   // the callee is deoptimized as we race thru here. If that
1191   // happens we don't want to take a safepoint because the
1192   // caller frame will look interpreted and arguments are now
1193   // "compiled" so it is much better to make this transition
1194   // invisible to the stack walking code. Unfortunately if
1195   // we try and find the callee by normal means a safepoint
1196   // is possible. So we stash the desired callee in the thread
1197   // and the vm will find there should this case occur.
1198 
1199   __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
1200 
1201   // put Method* where a c2i would expect should we end up there
1202   // only needed because of c2 resolve stubs return Method* as a result in
1203   // rax
1204   __ mov(rax, rbx);
1205   __ jmp(r11);
1206 }
1207 
1208 // ---------------------------------------------------------------
1209 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1210                                                             int comp_args_on_stack,
1211                                                             int comp_args_on_stack_cc,
1212                                                             const GrowableArray<SigEntry>* sig,
1213                                                             const VMRegPair* regs,
1214                                                             const GrowableArray<SigEntry>* sig_cc,
1215                                                             const VMRegPair* regs_cc,
1216                                                             AdapterFingerPrint* fingerprint,
1217                                                             AdapterBlob*& new_adapter) {
1218   address i2c_entry = __ pc();
1219 
1220   if (StressValueTypePassFieldsAsArgs) {
1221     // For stress testing, don't unpack value types in the i2c adapter but
1222     // call the value type entry point and let it take care of unpacking.
1223     gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);
1224   } else {
1225     gen_i2c_adapter(masm, comp_args_on_stack_cc, sig_cc, regs_cc);
1226   }
1227 
1228   // -------------------------------------------------------------------------
1229   // Generate a C2I adapter.  On entry we know rbx holds the Method* during calls
1230   // to the interpreter.  The args start out packed in the compiled layout.  They
1231   // need to be unpacked into the interpreter layout.  This will almost always
1232   // require some stack space.  We grow the current (compiled) stack, then repack
1233   // the args.  We  finally end in a jump to the generic interpreter entry point.
1234   // On exit from the interpreter, the interpreter will restore our SP (lest the
1235   // compiled code, which relys solely on SP and not RBP, get sick).
1236 
1237   address c2i_unverified_entry = __ pc();
1238   Label skip_fixup;
1239   Label ok;
1240 
1241   Register holder = rax;
1242   Register receiver = j_rarg0;
1243   Register temp = rbx;
1244 
1245   {
1246     __ load_klass(temp, receiver);
1247     __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
1248     __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
1249     __ jcc(Assembler::equal, ok);
1250     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1251 
1252     __ bind(ok);
1253     // Method might have been compiled since the call site was patched to
1254     // interpreted if that is the case treat it as a miss so we can get
1255     // the call site corrected.
1256     __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
1257     __ jcc(Assembler::equal, skip_fixup);
1258     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1259   }
1260 
1261   address c2i_entry = __ pc();
1262   address c2i_value_entry = c2i_entry;
1263 
1264   OopMapSet* oop_maps = NULL;
1265   int frame_complete = CodeOffsets::frame_never_safe;
1266   int frame_size_in_words = 0;
1267   gen_c2i_adapter(masm, sig_cc, regs_cc, skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words);
1268 
1269   if (regs != regs_cc) {
1270     // Non-scalarized c2i adapter
1271     c2i_value_entry = __ pc();
1272     Label unused;
1273     gen_c2i_adapter(masm, sig, regs, unused, i2c_entry, oop_maps, frame_complete, frame_size_in_words);
1274   }
1275 
1276   __ flush();

1277 
1278   // The c2i adapter might safepoint and trigger a GC. The caller must make sure that
1279   // the GC knows about the location of oop argument locations passed to the c2i adapter.
1280   bool caller_must_gc_arguments = (regs != regs_cc);
1281   new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments);

































1282 
1283   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_value_entry, c2i_unverified_entry);
1284 }
1285 
1286 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1287                                          VMRegPair *regs,
1288                                          VMRegPair *regs2,
1289                                          int total_args_passed) {
1290   assert(regs2 == NULL, "not needed on x86");
1291 // We return the amount of VMRegImpl stack slots we need to reserve for all
1292 // the arguments NOT counting out_preserve_stack_slots.
1293 
1294 // NOTE: These arrays will have to change when c1 is ported
1295 #ifdef _WIN64
1296     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1297       c_rarg0, c_rarg1, c_rarg2, c_rarg3
1298     };
1299     static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1300       c_farg0, c_farg1, c_farg2, c_farg3
1301     };
1302 #else
1303     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {


4334   // rdx: exception pc
4335   // Jump to handler
4336 
4337   __ jmp(r8);
4338 
4339   // Make sure all code is generated
4340   masm->flush();
4341 
4342   // Set exception blob
4343   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
4344 }
4345 #endif // COMPILER2
4346 
4347 BufferedValueTypeBlob* SharedRuntime::generate_buffered_value_type_adapter(const ValueKlass* vk) {
4348   BufferBlob* buf = BufferBlob::create("value types pack/unpack", 16 * K);
4349   CodeBuffer buffer(buf);
4350   short buffer_locs[20];
4351   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
4352                                          sizeof(buffer_locs)/sizeof(relocInfo));
4353 
4354   MacroAssembler* masm = new MacroAssembler(&buffer);

4355 
4356   const Array<SigEntry>* sig_vk = vk->extended_sig();
4357   const Array<VMRegPair>* regs = vk->return_regs();
4358 
4359   int pack_fields_off = __ offset();
4360 
4361   int j = 1;
4362   for (int i = 0; i < sig_vk->length(); i++) {
4363     BasicType bt = sig_vk->at(i)._bt;
4364     if (bt == T_VALUETYPE) {
4365       continue;
4366     }
4367     if (bt == T_VOID) {
4368       if (sig_vk->at(i-1)._bt == T_LONG ||
4369           sig_vk->at(i-1)._bt == T_DOUBLE) {
4370         j++;
4371       }
4372       continue;
4373     }
4374     int off = sig_vk->at(i)._offset;
4375     assert(off > 0, "offset in object should be positive");
4376     VMRegPair pair = regs->at(j);
4377     VMReg r_1 = pair.first();
4378     VMReg r_2 = pair.second();
4379     Address to(rax, off);
4380     if (bt == T_FLOAT) {
4381       __ movflt(to, r_1->as_XMMRegister());
4382     } else if (bt == T_DOUBLE) {
4383       __ movdbl(to, r_1->as_XMMRegister());
4384     } else if (bt == T_OBJECT || bt == T_ARRAY) {
4385       Register val = r_1->as_Register();
4386       assert_different_registers(rax, val);
4387       // We don't need barriers because the destination is a newly allocated object.
4388       // Also, we cannot use store_heap_oop(to, val) because it uses r8 as tmp.
4389       if (UseCompressedOops) {
4390         __ encode_heap_oop(val);
4391         __ movl(to, val);
4392       } else {
4393         __ movptr(to, val);
4394       }
4395 
4396     } else {
4397       assert(is_java_primitive(bt), "unexpected basic type");
4398       assert_different_registers(rax, r_1->as_Register());
4399       size_t size_in_bytes = type2aelembytes(bt);
4400       __ store_sized_value(to, r_1->as_Register(), size_in_bytes);
4401     }
4402     j++;
4403   }
4404   assert(j == regs->length(), "missed a field?");
4405 
4406   __ ret(0);
4407 
4408   int unpack_fields_off = __ offset();
4409 
4410   j = 1;
4411   for (int i = 0; i < sig_vk->length(); i++) {
4412     BasicType bt = sig_vk->at(i)._bt;
4413     if (bt == T_VALUETYPE) {
4414       continue;
4415     }
4416     if (bt == T_VOID) {
4417       if (sig_vk->at(i-1)._bt == T_LONG ||
4418           sig_vk->at(i-1)._bt == T_DOUBLE) {
4419         j++;
4420       }
4421       continue;
4422     }
4423     int off = sig_vk->at(i)._offset;
4424     assert(off > 0, "offset in object should be positive");
4425     VMRegPair pair = regs->at(j);
4426     VMReg r_1 = pair.first();
4427     VMReg r_2 = pair.second();
4428     Address from(rax, off);
4429     if (bt == T_FLOAT) {
4430       __ movflt(r_1->as_XMMRegister(), from);
4431     } else if (bt == T_DOUBLE) {
4432       __ movdbl(r_1->as_XMMRegister(), from);
4433     } else if (bt == T_OBJECT || bt == T_ARRAY) {
4434       assert_different_registers(rax, r_1->as_Register());
4435       __ load_heap_oop(r_1->as_Register(), from);
4436     } else {
4437       assert(is_java_primitive(bt), "unexpected basic type");
4438       assert_different_registers(rax, r_1->as_Register());
4439       size_t size_in_bytes = type2aelembytes(bt);
4440       __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
4441     }
4442     j++;
4443   }
4444   assert(j == regs->length(), "missed a field?");
4445 
4446   if (StressValueTypeReturnedAsFields) {
4447     __ load_klass(rax, rax);
4448     __ orptr(rax, 1);
4449   }
4450 
4451   __ ret(0);
4452 
4453   __ flush();
4454 
4455   return BufferedValueTypeBlob::create(&buffer, pack_fields_off, unpack_fields_off);
4456 }
< prev index next >