src/cpu/x86/vm/methodHandles_x86.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 7045514 Sdiff src/cpu/x86/vm

src/cpu/x86/vm/methodHandles_x86.cpp

Print this page




 122 }
 123 
 124 oop MethodHandles::RicochetFrame::compute_saved_args_layout(bool read_cache, bool write_cache) {
 125   oop cookie = NULL;
 126   if (read_cache) {
 127     cookie = saved_args_layout();
 128     if (cookie != NULL)  return cookie;
 129   }
 130   oop target = saved_target();
 131   oop mtype  = java_lang_invoke_MethodHandle::type(target);
 132   oop mtform = java_lang_invoke_MethodType::form(mtype);
 133   cookie = java_lang_invoke_MethodTypeForm::vmlayout(mtform);
 134   if (write_cache)  {
 135     (*saved_args_layout_addr()) = cookie;
 136   }
 137   return cookie;
 138 }
 139 
 140 void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm,
 141                                                           // output params:
 142                                                           int* frame_size_in_words,
 143                                                           int* bounce_offset,
 144                                                           int* exception_offset) {

 145   (*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize;
 146 
 147   address start = __ pc();
 148 
 149 #ifdef ASSERT
 150   __ hlt(); __ hlt(); __ hlt();
 151   // here's a hint of something special:
 152   __ push(MAGIC_NUMBER_1);
 153   __ push(MAGIC_NUMBER_2);
 154 #endif //ASSERT
 155   __ hlt();  // not reached
 156 
 157   // A return PC has just been popped from the stack.
 158   // Return values are in registers.
 159   // The ebp points into the RicochetFrame, which contains
 160   // a cleanup continuation we must return to.
 161 
 162   (*bounce_offset) = __ pc() - start;
 163   BLOCK_COMMENT("ricochet_blob.bounce");
 164 


 349   __ load_unsigned_byte(reg, conversion_field_addr.plus_disp(offset));
 350   assert(CONV_VMINFO_MASK == right_n_bits(bits - shift), "else change type of previous load");
 351   assert(shift == 0, "no shift needed");
 352 }
 353 
 354 void MethodHandles::load_conversion_dest_type(MacroAssembler* _masm, Register reg, Address conversion_field_addr) {
 355   int bits   = BitsPerByte;
 356   int offset = (CONV_DEST_TYPE_SHIFT / bits);
 357   int shift  = (CONV_DEST_TYPE_SHIFT % bits);
 358   __ load_unsigned_byte(reg, conversion_field_addr.plus_disp(offset));
 359   assert(CONV_TYPE_MASK == right_n_bits(bits - shift), "else change type of previous load");
 360   __ shrl(reg, shift);
 361   DEBUG_ONLY(int conv_type_bits = (int) exact_log2(CONV_TYPE_MASK+1));
 362   assert((shift + conv_type_bits) == bits, "left justified in byte");
 363 }
 364 
 365 void MethodHandles::load_stack_move(MacroAssembler* _masm,
 366                                     Register rdi_stack_move,
 367                                     Register rcx_amh,
 368                                     bool might_be_negative) {
 369   BLOCK_COMMENT("load_stack_move");
 370   Address rcx_amh_conversion(rcx_amh, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes());
 371   __ movl(rdi_stack_move, rcx_amh_conversion);
 372   __ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
 373 #ifdef _LP64
 374   if (might_be_negative) {
 375     // clean high bits of stack motion register (was loaded as an int)
 376     __ movslq(rdi_stack_move, rdi_stack_move);
 377   }
 378 #endif //_LP64
 379   if (VerifyMethodHandles) {
 380     Label L_ok, L_bad;
 381     int32_t stack_move_limit = 0x4000;  // extra-large
 382     __ cmpptr(rdi_stack_move, stack_move_limit);
 383     __ jcc(Assembler::greaterEqual, L_bad);
 384     __ cmpptr(rdi_stack_move, -stack_move_limit);
 385     __ jcc(Assembler::greater, L_ok);
 386     __ bind(L_bad);
 387     __ stop("load_stack_move of garbage value");
 388     __ BIND(L_ok);
 389   }

 390 }
 391 
 392 #ifdef ASSERT
 393 void MethodHandles::RicochetFrame::verify_offsets() {
 394   // Check compatibility of this struct with the more generally used offsets of class frame:
 395   int ebp_off = sender_link_offset_in_bytes();  // offset from struct base to local rbp value
 396   assert(ebp_off + wordSize*frame::interpreter_frame_method_offset      == saved_args_base_offset_in_bytes(), "");
 397   assert(ebp_off + wordSize*frame::interpreter_frame_last_sp_offset     == conversion_offset_in_bytes(), "");
 398   assert(ebp_off + wordSize*frame::interpreter_frame_sender_sp_offset   == exact_sender_sp_offset_in_bytes(), "");
 399   // These last two have to be exact:
 400   assert(ebp_off + wordSize*frame::link_offset                          == sender_link_offset_in_bytes(), "");
 401   assert(ebp_off + wordSize*frame::return_addr_offset                   == sender_pc_offset_in_bytes(), "");
 402 }
 403 
 404 void MethodHandles::RicochetFrame::verify() const {
 405   verify_offsets();
 406   assert(magic_number_1() == MAGIC_NUMBER_1, "");
 407   assert(magic_number_2() == MAGIC_NUMBER_2, "");
 408   if (!Universe::heap()->is_gc_active()) {
 409     if (saved_args_layout() != NULL) {


1130   Address vmarg;                // __ argument_address(vmargslot)
1131 
1132   const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
1133 
1134   if (have_entry(ek)) {
1135     __ nop();                   // empty stubs make SG sick
1136     return;
1137   }
1138 
1139 #ifdef ASSERT
1140   __ push((int32_t) 0xEEEEEEEE);
1141   __ push((int32_t) (intptr_t) entry_name(ek));
1142   LP64_ONLY(__ push((int32_t) high((intptr_t) entry_name(ek))));
1143   __ push((int32_t) 0x33333333);
1144 #endif //ASSERT
1145 
1146   address interp_entry = __ pc();
1147 
1148   trace_method_handle(_masm, entry_name(ek));
1149 
1150   BLOCK_COMMENT(entry_name(ek));
1151 
1152   switch ((int) ek) {
1153   case _raise_exception:
1154     {
1155       // Not a real MH entry, but rather shared code for raising an
1156       // exception.  Since we use the compiled entry, arguments are
1157       // expected in compiler argument registers.
1158       assert(raise_exception_method(), "must be set");
1159       assert(raise_exception_method()->from_compiled_entry(), "method must be linked");
1160 
1161       const Register rdi_pc = rax;
1162       __ pop(rdi_pc);  // caller PC
1163       __ mov(rsp, saved_last_sp);  // cut the stack back to where the caller started
1164 
1165       Register rbx_method = rbx_temp;
1166       Label L_no_method;
1167       // FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method
1168       __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
1169       __ testptr(rbx_method, rbx_method);
1170       __ jccb(Assembler::zero, L_no_method);


1275 
1276       __ bind(no_such_interface);
1277       // Throw an exception.
1278       // For historical reasons, it will be IncompatibleClassChangeError.
1279       __ mov(rbx_temp, rcx_recv);  // rarg2_required might be RCX
1280       assert_different_registers(rarg2_required, rbx_temp);
1281       __ movptr(rarg2_required, Address(rdx_intf, java_mirror_offset));  // required interface
1282       __ mov(   rarg1_actual,   rbx_temp);                               // bad receiver
1283       __ movl(  rarg0_code,     (int) Bytecodes::_invokeinterface);      // who is complaining?
1284       __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
1285     }
1286     break;
1287 
1288   case _bound_ref_mh:
1289   case _bound_int_mh:
1290   case _bound_long_mh:
1291   case _bound_ref_direct_mh:
1292   case _bound_int_direct_mh:
1293   case _bound_long_direct_mh:
1294     {
1295       bool direct_to_method = (ek >= _bound_ref_direct_mh);
1296       BasicType arg_type  = ek_bound_mh_arg_type(ek);
1297       int       arg_slots = type2size[arg_type];
1298 
1299       // make room for the new argument:
1300       __ movl(rax_argslot, rcx_bmh_vmargslot);
1301       __ lea(rax_argslot, __ argument_address(rax_argslot));
1302 
1303       insert_arg_slots(_masm, arg_slots * stack_move_unit(), rax_argslot, rbx_temp, rdx_temp);
1304 
1305       // store bound argument into the new stack slot:
1306       __ load_heap_oop(rbx_temp, rcx_bmh_argument);
1307       if (arg_type == T_OBJECT) {
1308         __ movptr(Address(rax_argslot, 0), rbx_temp);
1309       } else {
1310         Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type));
1311         move_typed_arg(_masm, arg_type, false,
1312                        Address(rax_argslot, 0),
1313                        prim_value_addr,
1314                        rbx_temp, rdx_temp);
1315       }


1912         __ load_method_handle_vmslots(rbx_temp, rcx_recv, rdx_temp);
1913         Label L_count_ok;
1914         __ cmpl(rbx_temp, collect_count_constant);
1915         __ jcc(Assembler::equal, L_count_ok);
1916         __ stop("bad vminfo in AMH.conv");
1917         __ BIND(L_count_ok);
1918       }
1919 #endif //ASSERT
1920 
1921       // copy |collect| slots directly to TOS:
1922       push_arg_slots(_masm, rax_coll, collect_count, 0, rbx_temp, rdx_temp);
1923       // Now pushed:  ... keep1 | collect | keep2 | RF... | collect |
1924       // rax_coll still points at the trailing edge of |collect| and leading edge of |keep2|
1925 
1926       // If necessary, adjust the saved arguments to make room for the eventual return value.
1927       // Normal adjustment:  ... keep1 | +dest+ | -collect- | keep2 | RF... | collect |
1928       // If retaining args:  ... keep1 | +dest+ |  collect  | keep2 | RF... | collect |
1929       // In the non-retaining case, this might move keep2 either up or down.
1930       // We don't have to copy the whole | RF... collect | complex,
1931       // but we must adjust RF.saved_args_base.
1932       // Also, from now on, we will forget about the origial copy of |collect|.
1933       // If we are retaining it, we will treat it as part of |keep2|.
1934       // For clarity we will define |keep3| = |collect|keep2| or |keep2|.
1935 
1936       BLOCK_COMMENT("adjust trailing arguments {");
1937       // Compare the sizes of |+dest+| and |-collect-|, which are opposed opening and closing movements.
1938       int                open_count  = dest_count;
1939       RegisterOrConstant close_count = collect_count_constant;
1940       Register rdi_close_count = rdi_collect_count;
1941       if (retain_original_args) {
1942         close_count = constant(0);
1943       } else if (collect_count_constant == -1) {
1944         close_count = rdi_collect_count;
1945       }
1946 
1947       // How many slots need moving?  This is simply dest_slot (0 => no |keep3|).
1948       RegisterOrConstant keep3_count;
1949       Register rsi_keep3_count = rsi;  // can repair from RF.exact_sender_sp
1950       if (dest_slot_constant >= 0) {
1951         keep3_count = dest_slot_constant;
1952       } else  {


1969       bool stomp_dest = (NOT_DEBUG(dest == T_OBJECT) DEBUG_ONLY(dest_count != 0));
1970       bool fix_arg_base = (!close_count.is_constant() || open_count != close_count.as_constant());
1971 
1972       if (stomp_dest | fix_arg_base) {
1973         // we will probably need an updated rax_argv value
1974         if (collect_slot_constant >= 0) {
1975           // rax_coll already holds the leading edge of |keep2|, so tweak it
1976           assert(rax_coll == rax_argv, "elided a move");
1977           if (collect_slot_constant != 0)
1978             __ subptr(rax_argv, collect_slot_constant * Interpreter::stackElementSize);
1979         } else {
1980           // Just reload from RF.saved_args_base.
1981           __ movptr(rax_argv, saved_args_base_addr);
1982         }
1983       }
1984 
1985       // Old and new argument locations (based at slot 0).
1986       // Net shift (&new_argv - &old_argv) is (close_count - open_count).
1987       bool zero_open_count = (open_count == 0);  // remember this bit of info
1988       if (move_keep3 && fix_arg_base) {
1989         // It will be easier t have everything in one register:
1990         if (close_count.is_register()) {
1991           // Deduct open_count from close_count register to get a clean +/- value.
1992           __ subptr(close_count.as_register(), open_count);
1993         } else {
1994           close_count = close_count.as_constant() - open_count;
1995         }
1996         open_count = 0;
1997       }
1998       Address old_argv(rax_argv, 0);
1999       Address new_argv(rax_argv, close_count,  Interpreter::stackElementScale(),
2000                                 - open_count * Interpreter::stackElementSize);
2001 
2002       // First decide if any actual data are to be moved.
2003       // We can skip if (a) |keep3| is empty, or (b) the argument list size didn't change.
2004       // (As it happens, all movements involve an argument list size change.)
2005 
2006       // If there are variable parameters, use dynamic checks to skip around the whole mess.
2007       Label L_done;
2008       if (!keep3_count.is_constant()) {
2009         __ testl(keep3_count.as_register(), keep3_count.as_register());


2379       __ movl(          rarg0_code,     (int) Bytecodes::_aaload);                      // who is complaining?
2380       __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
2381 
2382       __ bind(bad_array_length);
2383       UNPUSH_RSI;
2384       assert(!vmarg.uses(rarg2_required), "must be different registers");
2385       __ mov(    rarg2_required, rcx_recv);                       // AMH requiring a certain length
2386       __ movptr( rarg1_actual,   vmarg);                          // bad array
2387       __ movl(   rarg0_code,     (int) Bytecodes::_arraylength);  // who is complaining?
2388       __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
2389 #undef UNPUSH_RSI
2390 
2391       break;
2392     }
2393 
2394   default:
2395     // do not require all platforms to recognize all adapter types
2396     __ nop();
2397     return;
2398   }

2399   __ hlt();
2400 
2401   address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
2402   __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
2403 
2404   init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
2405 }


 122 }
 123 
 124 oop MethodHandles::RicochetFrame::compute_saved_args_layout(bool read_cache, bool write_cache) {
 125   oop cookie = NULL;
 126   if (read_cache) {
 127     cookie = saved_args_layout();
 128     if (cookie != NULL)  return cookie;
 129   }
 130   oop target = saved_target();
 131   oop mtype  = java_lang_invoke_MethodHandle::type(target);
 132   oop mtform = java_lang_invoke_MethodType::form(mtype);
 133   cookie = java_lang_invoke_MethodTypeForm::vmlayout(mtform);
 134   if (write_cache)  {
 135     (*saved_args_layout_addr()) = cookie;
 136   }
 137   return cookie;
 138 }
 139 
 140 void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm,
 141                                                           // output params:

 142                                                           int* bounce_offset,
 143                                                           int* exception_offset,
 144                                                           int* frame_size_in_words) {
 145   (*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize;
 146 
 147   address start = __ pc();
 148 
 149 #ifdef ASSERT
 150   __ hlt(); __ hlt(); __ hlt();
 151   // here's a hint of something special:
 152   __ push(MAGIC_NUMBER_1);
 153   __ push(MAGIC_NUMBER_2);
 154 #endif //ASSERT
 155   __ hlt();  // not reached
 156 
 157   // A return PC has just been popped from the stack.
 158   // Return values are in registers.
 159   // The ebp points into the RicochetFrame, which contains
 160   // a cleanup continuation we must return to.
 161 
 162   (*bounce_offset) = __ pc() - start;
 163   BLOCK_COMMENT("ricochet_blob.bounce");
 164 


 349   __ load_unsigned_byte(reg, conversion_field_addr.plus_disp(offset));
 350   assert(CONV_VMINFO_MASK == right_n_bits(bits - shift), "else change type of previous load");
 351   assert(shift == 0, "no shift needed");
 352 }
 353 
 354 void MethodHandles::load_conversion_dest_type(MacroAssembler* _masm, Register reg, Address conversion_field_addr) {
 355   int bits   = BitsPerByte;
 356   int offset = (CONV_DEST_TYPE_SHIFT / bits);
 357   int shift  = (CONV_DEST_TYPE_SHIFT % bits);
 358   __ load_unsigned_byte(reg, conversion_field_addr.plus_disp(offset));
 359   assert(CONV_TYPE_MASK == right_n_bits(bits - shift), "else change type of previous load");
 360   __ shrl(reg, shift);
 361   DEBUG_ONLY(int conv_type_bits = (int) exact_log2(CONV_TYPE_MASK+1));
 362   assert((shift + conv_type_bits) == bits, "left justified in byte");
 363 }
 364 
 365 void MethodHandles::load_stack_move(MacroAssembler* _masm,
 366                                     Register rdi_stack_move,
 367                                     Register rcx_amh,
 368                                     bool might_be_negative) {
 369   BLOCK_COMMENT("load_stack_move {");
 370   Address rcx_amh_conversion(rcx_amh, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes());
 371   __ movl(rdi_stack_move, rcx_amh_conversion);
 372   __ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
 373 #ifdef _LP64
 374   if (might_be_negative) {
 375     // clean high bits of stack motion register (was loaded as an int)
 376     __ movslq(rdi_stack_move, rdi_stack_move);
 377   }
 378 #endif //_LP64
 379   if (VerifyMethodHandles) {
 380     Label L_ok, L_bad;
 381     int32_t stack_move_limit = 0x4000;  // extra-large
 382     __ cmpptr(rdi_stack_move, stack_move_limit);
 383     __ jcc(Assembler::greaterEqual, L_bad);
 384     __ cmpptr(rdi_stack_move, -stack_move_limit);
 385     __ jcc(Assembler::greater, L_ok);
 386     __ bind(L_bad);
 387     __ stop("load_stack_move of garbage value");
 388     __ BIND(L_ok);
 389   }
 390   BLOCK_COMMENT("} load_stack_move");
 391 }
 392 
 393 #ifdef ASSERT
 394 void MethodHandles::RicochetFrame::verify_offsets() {
 395   // Check compatibility of this struct with the more generally used offsets of class frame:
 396   int ebp_off = sender_link_offset_in_bytes();  // offset from struct base to local rbp value
 397   assert(ebp_off + wordSize*frame::interpreter_frame_method_offset      == saved_args_base_offset_in_bytes(), "");
 398   assert(ebp_off + wordSize*frame::interpreter_frame_last_sp_offset     == conversion_offset_in_bytes(), "");
 399   assert(ebp_off + wordSize*frame::interpreter_frame_sender_sp_offset   == exact_sender_sp_offset_in_bytes(), "");
 400   // These last two have to be exact:
 401   assert(ebp_off + wordSize*frame::link_offset                          == sender_link_offset_in_bytes(), "");
 402   assert(ebp_off + wordSize*frame::return_addr_offset                   == sender_pc_offset_in_bytes(), "");
 403 }
 404 
 405 void MethodHandles::RicochetFrame::verify() const {
 406   verify_offsets();
 407   assert(magic_number_1() == MAGIC_NUMBER_1, "");
 408   assert(magic_number_2() == MAGIC_NUMBER_2, "");
 409   if (!Universe::heap()->is_gc_active()) {
 410     if (saved_args_layout() != NULL) {


1131   Address vmarg;                // __ argument_address(vmargslot)
1132 
1133   const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
1134 
1135   if (have_entry(ek)) {
1136     __ nop();                   // empty stubs make SG sick
1137     return;
1138   }
1139 
1140 #ifdef ASSERT
1141   __ push((int32_t) 0xEEEEEEEE);
1142   __ push((int32_t) (intptr_t) entry_name(ek));
1143   LP64_ONLY(__ push((int32_t) high((intptr_t) entry_name(ek))));
1144   __ push((int32_t) 0x33333333);
1145 #endif //ASSERT
1146 
1147   address interp_entry = __ pc();
1148 
1149   trace_method_handle(_masm, entry_name(ek));
1150 
1151   BLOCK_COMMENT(err_msg("Entry %s {", entry_name(ek)));
1152 
1153   switch ((int) ek) {
1154   case _raise_exception:
1155     {
1156       // Not a real MH entry, but rather shared code for raising an
1157       // exception.  Since we use the compiled entry, arguments are
1158       // expected in compiler argument registers.
1159       assert(raise_exception_method(), "must be set");
1160       assert(raise_exception_method()->from_compiled_entry(), "method must be linked");
1161 
1162       const Register rdi_pc = rax;
1163       __ pop(rdi_pc);  // caller PC
1164       __ mov(rsp, saved_last_sp);  // cut the stack back to where the caller started
1165 
1166       Register rbx_method = rbx_temp;
1167       Label L_no_method;
1168       // FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method
1169       __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
1170       __ testptr(rbx_method, rbx_method);
1171       __ jccb(Assembler::zero, L_no_method);


1276 
1277       __ bind(no_such_interface);
1278       // Throw an exception.
1279       // For historical reasons, it will be IncompatibleClassChangeError.
1280       __ mov(rbx_temp, rcx_recv);  // rarg2_required might be RCX
1281       assert_different_registers(rarg2_required, rbx_temp);
1282       __ movptr(rarg2_required, Address(rdx_intf, java_mirror_offset));  // required interface
1283       __ mov(   rarg1_actual,   rbx_temp);                               // bad receiver
1284       __ movl(  rarg0_code,     (int) Bytecodes::_invokeinterface);      // who is complaining?
1285       __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
1286     }
1287     break;
1288 
1289   case _bound_ref_mh:
1290   case _bound_int_mh:
1291   case _bound_long_mh:
1292   case _bound_ref_direct_mh:
1293   case _bound_int_direct_mh:
1294   case _bound_long_direct_mh:
1295     {
1296       const bool direct_to_method = (ek >= _bound_ref_direct_mh);
1297       BasicType arg_type  = ek_bound_mh_arg_type(ek);
1298       int       arg_slots = type2size[arg_type];
1299 
1300       // make room for the new argument:
1301       __ movl(rax_argslot, rcx_bmh_vmargslot);
1302       __ lea(rax_argslot, __ argument_address(rax_argslot));
1303 
1304       insert_arg_slots(_masm, arg_slots * stack_move_unit(), rax_argslot, rbx_temp, rdx_temp);
1305 
1306       // store bound argument into the new stack slot:
1307       __ load_heap_oop(rbx_temp, rcx_bmh_argument);
1308       if (arg_type == T_OBJECT) {
1309         __ movptr(Address(rax_argslot, 0), rbx_temp);
1310       } else {
1311         Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type));
1312         move_typed_arg(_masm, arg_type, false,
1313                        Address(rax_argslot, 0),
1314                        prim_value_addr,
1315                        rbx_temp, rdx_temp);
1316       }


1913         __ load_method_handle_vmslots(rbx_temp, rcx_recv, rdx_temp);
1914         Label L_count_ok;
1915         __ cmpl(rbx_temp, collect_count_constant);
1916         __ jcc(Assembler::equal, L_count_ok);
1917         __ stop("bad vminfo in AMH.conv");
1918         __ BIND(L_count_ok);
1919       }
1920 #endif //ASSERT
1921 
1922       // copy |collect| slots directly to TOS:
1923       push_arg_slots(_masm, rax_coll, collect_count, 0, rbx_temp, rdx_temp);
1924       // Now pushed:  ... keep1 | collect | keep2 | RF... | collect |
1925       // rax_coll still points at the trailing edge of |collect| and leading edge of |keep2|
1926 
1927       // If necessary, adjust the saved arguments to make room for the eventual return value.
1928       // Normal adjustment:  ... keep1 | +dest+ | -collect- | keep2 | RF... | collect |
1929       // If retaining args:  ... keep1 | +dest+ |  collect  | keep2 | RF... | collect |
1930       // In the non-retaining case, this might move keep2 either up or down.
1931       // We don't have to copy the whole | RF... collect | complex,
1932       // but we must adjust RF.saved_args_base.
1933       // Also, from now on, we will forget about the original copy of |collect|.
1934       // If we are retaining it, we will treat it as part of |keep2|.
1935       // For clarity we will define |keep3| = |collect|keep2| or |keep2|.
1936 
1937       BLOCK_COMMENT("adjust trailing arguments {");
1938       // Compare the sizes of |+dest+| and |-collect-|, which are opposed opening and closing movements.
1939       int                open_count  = dest_count;
1940       RegisterOrConstant close_count = collect_count_constant;
1941       Register rdi_close_count = rdi_collect_count;
1942       if (retain_original_args) {
1943         close_count = constant(0);
1944       } else if (collect_count_constant == -1) {
1945         close_count = rdi_collect_count;
1946       }
1947 
1948       // How many slots need moving?  This is simply dest_slot (0 => no |keep3|).
1949       RegisterOrConstant keep3_count;
1950       Register rsi_keep3_count = rsi;  // can repair from RF.exact_sender_sp
1951       if (dest_slot_constant >= 0) {
1952         keep3_count = dest_slot_constant;
1953       } else  {


1970       bool stomp_dest = (NOT_DEBUG(dest == T_OBJECT) DEBUG_ONLY(dest_count != 0));
1971       bool fix_arg_base = (!close_count.is_constant() || open_count != close_count.as_constant());
1972 
1973       if (stomp_dest | fix_arg_base) {
1974         // we will probably need an updated rax_argv value
1975         if (collect_slot_constant >= 0) {
1976           // rax_coll already holds the leading edge of |keep2|, so tweak it
1977           assert(rax_coll == rax_argv, "elided a move");
1978           if (collect_slot_constant != 0)
1979             __ subptr(rax_argv, collect_slot_constant * Interpreter::stackElementSize);
1980         } else {
1981           // Just reload from RF.saved_args_base.
1982           __ movptr(rax_argv, saved_args_base_addr);
1983         }
1984       }
1985 
1986       // Old and new argument locations (based at slot 0).
1987       // Net shift (&new_argv - &old_argv) is (close_count - open_count).
1988       bool zero_open_count = (open_count == 0);  // remember this bit of info
1989       if (move_keep3 && fix_arg_base) {
1990         // It will be easier to have everything in one register:
1991         if (close_count.is_register()) {
1992           // Deduct open_count from close_count register to get a clean +/- value.
1993           __ subptr(close_count.as_register(), open_count);
1994         } else {
1995           close_count = close_count.as_constant() - open_count;
1996         }
1997         open_count = 0;
1998       }
1999       Address old_argv(rax_argv, 0);
2000       Address new_argv(rax_argv, close_count,  Interpreter::stackElementScale(),
2001                                 - open_count * Interpreter::stackElementSize);
2002 
2003       // First decide if any actual data are to be moved.
2004       // We can skip if (a) |keep3| is empty, or (b) the argument list size didn't change.
2005       // (As it happens, all movements involve an argument list size change.)
2006 
2007       // If there are variable parameters, use dynamic checks to skip around the whole mess.
2008       Label L_done;
2009       if (!keep3_count.is_constant()) {
2010         __ testl(keep3_count.as_register(), keep3_count.as_register());


2380       __ movl(          rarg0_code,     (int) Bytecodes::_aaload);                      // who is complaining?
2381       __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
2382 
2383       __ bind(bad_array_length);
2384       UNPUSH_RSI;
2385       assert(!vmarg.uses(rarg2_required), "must be different registers");
2386       __ mov(    rarg2_required, rcx_recv);                       // AMH requiring a certain length
2387       __ movptr( rarg1_actual,   vmarg);                          // bad array
2388       __ movl(   rarg0_code,     (int) Bytecodes::_arraylength);  // who is complaining?
2389       __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
2390 #undef UNPUSH_RSI
2391 
2392       break;
2393     }
2394 
2395   default:
2396     // do not require all platforms to recognize all adapter types
2397     __ nop();
2398     return;
2399   }
2400   BLOCK_COMMENT(err_msg("} Entry %s", entry_name(ek)));
2401   __ hlt();
2402 
2403   address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
2404   __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
2405 
2406   init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
2407 }
src/cpu/x86/vm/methodHandles_x86.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File