Print this page
rev 1071 : 6829192: JSR 292 needs to support 64-bit x86
Summary: changes for method handles and invokedynamic
Reviewed-by: ?, ?
rev 1072 : [mq]: meth.walker.patch
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/x86/vm/methodHandles_x86.cpp
+++ new/src/cpu/x86/vm/methodHandles_x86.cpp
1 1 /*
2 2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 #include "incls/_precompiled.incl"
26 26 #include "incls/_methodHandles_x86.cpp.incl"
27 27
28 28 #define __ _masm->
29 29
30 30 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
31 31 address interpreted_entry) {
32 32 // Just before the actual machine code entry point, allocate space
33 33 // for a MethodHandleEntry::Data record, so that we can manage everything
34 34 // from one base pointer.
35 35 __ align(wordSize);
36 36 address target = __ pc() + sizeof(Data);
37 37 while (__ pc() < target) {
38 38 __ nop();
39 39 __ align(wordSize);
40 40 }
41 41
42 42 MethodHandleEntry* me = (MethodHandleEntry*) __ pc();
43 43 me->set_end_address(__ pc()); // set a temporary end_address
44 44 me->set_from_interpreted_entry(interpreted_entry);
45 45 me->set_type_checking_entry(NULL);
46 46
47 47 return (address) me;
48 48 }
49 49
50 50 MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm,
51 51 address start_addr) {
52 52 MethodHandleEntry* me = (MethodHandleEntry*) start_addr;
53 53 assert(me->end_address() == start_addr, "valid ME");
54 54
55 55 // Fill in the real end_address:
56 56 __ align(wordSize);
57 57 me->set_end_address(__ pc());
58 58
59 59 return me;
60 60 }
61 61
62 62 #ifdef ASSERT
63 63 static void verify_argslot(MacroAssembler* _masm, Register rax_argslot,
64 64 const char* error_message) {
65 65 // Verify that argslot lies within (rsp, rbp].
66 66 Label L_ok, L_bad;
67 67 __ cmpptr(rax_argslot, rbp);
68 68 __ jcc(Assembler::above, L_bad);
69 69 __ cmpptr(rsp, rax_argslot);
70 70 __ jcc(Assembler::below, L_ok);
71 71 __ bind(L_bad);
72 72 __ stop(error_message);
73 73 __ bind(L_ok);
74 74 }
75 75 #endif
76 76
77 77
78 78 // Code generation
79 79 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
80 80 // rbx: methodOop
81 81 // rcx: receiver method handle (must load from sp[MethodTypeForm.vmslots])
82 82 // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
83 83 // rdx: garbage temp, blown away
84 84
85 85 Register rbx_method = rbx;
86 86 Register rcx_recv = rcx;
87 87 Register rax_mtype = rax;
88 88 Register rdx_temp = rdx;
89 89
90 90 // emit WrongMethodType path first, to enable jccb back-branch from main path
91 91 Label wrong_method_type;
92 92 __ bind(wrong_method_type);
93 93 __ push(rax_mtype); // required mtype
94 94 __ push(rcx_recv); // bad mh (1st stacked argument)
95 95 __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
96 96
97 97 // here's where control starts out:
98 98 __ align(CodeEntryAlignment);
99 99 address entry_point = __ pc();
100 100
101 101 // fetch the MethodType from the method handle into rax (the 'check' register)
102 102 {
103 103 Register tem = rbx_method;
104 104 for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
105 105 __ movptr(rax_mtype, Address(tem, *pchase));
106 106 tem = rax_mtype; // in case there is another indirection
107 107 }
108 108 }
109 109 Register rbx_temp = rbx_method; // done with incoming methodOop
110 110
111 111 // given the MethodType, find out where the MH argument is buried
112 112 __ movptr(rdx_temp, Address(rax_mtype,
113 113 __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rbx_temp)));
114 114 __ movl(rdx_temp, Address(rdx_temp,
115 115 __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, rbx_temp)));
116 116 __ movptr(rcx_recv, __ argument_address(rdx_temp));
117 117
118 118 __ check_method_handle_type(rax_mtype, rcx_recv, rdx_temp, wrong_method_type);
119 119 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
120 120
121 121 return entry_point;
122 122 }
123 123
124 124 // Helper to insert argument slots into the stack.
125 125 // arg_slots must be a multiple of stack_move_unit() and <= 0
126 126 void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
127 127 RegisterOrConstant arg_slots,
128 128 int arg_mask,
129 129 Register rax_argslot,
130 130 Register rbx_temp, Register rdx_temp) {
131 131 assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
132 132 (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
133 133
134 134 #ifdef ASSERT
135 135 verify_argslot(_masm, rax_argslot, "insertion point must fall within current frame");
136 136 if (arg_slots.is_register()) {
137 137 Label L_ok, L_bad;
138 138 __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
139 139 __ jcc(Assembler::greater, L_bad);
140 140 __ testl(arg_slots.as_register(), -stack_move_unit() - 1);
141 141 __ jcc(Assembler::zero, L_ok);
142 142 __ bind(L_bad);
143 143 __ stop("assert arg_slots <= 0 and clear low bits");
144 144 __ bind(L_ok);
145 145 } else {
146 146 assert(arg_slots.as_constant() <= 0, "");
147 147 assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
148 148 }
149 149 #endif //ASSERT
150 150
151 151 #ifdef _LP64
152 152 if (arg_slots.is_register()) {
153 153 // clean high bits of stack motion register (was loaded as an int)
154 154 __ movslq(arg_slots.as_register(), arg_slots.as_register());
155 155 }
156 156 #endif
157 157
158 158 // Make space on the stack for the inserted argument(s).
159 159 // Then pull down everything shallower than rax_argslot.
160 160 // The stacked return address gets pulled down with everything else.
161 161 // That is, copy [rsp, argslot) downward by -size words. In pseudo-code:
162 162 // rsp -= size;
163 163 // for (rdx = rsp + size; rdx < argslot; rdx++)
164 164 // rdx[-size] = rdx[0]
165 165 // argslot -= size;
166 166 __ mov(rdx_temp, rsp); // source pointer for copy
167 167 __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr));
168 168 {
169 169 Label loop;
170 170 __ bind(loop);
171 171 // pull one word down each time through the loop
172 172 __ movptr(rbx_temp, Address(rdx_temp, 0));
173 173 __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
174 174 __ addptr(rdx_temp, wordSize);
175 175 __ cmpptr(rdx_temp, rax_argslot);
176 176 __ jcc(Assembler::less, loop);
177 177 }
178 178
179 179 // Now move the argslot down, to point to the opened-up space.
180 180 __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
181 181
182 182 if (TaggedStackInterpreter && arg_mask != _INSERT_NO_MASK) {
183 183 // The caller has specified a bitmask of tags to put into the opened space.
184 184 // This only works when the arg_slots value is an assembly-time constant.
185 185 int constant_arg_slots = arg_slots.as_constant() / stack_move_unit();
186 186 int tag_offset = Interpreter::tag_offset_in_bytes() - Interpreter::value_offset_in_bytes();
187 187 for (int slot = 0; slot < constant_arg_slots; slot++) {
188 188 BasicType slot_type = ((arg_mask & (1 << slot)) == 0 ? T_OBJECT : T_INT);
189 189 int slot_offset = Interpreter::stackElementSize() * slot;
190 190 Address tag_addr(rax_argslot, slot_offset + tag_offset);
191 191 __ movptr(tag_addr, frame::tag_for_basic_type(slot_type));
192 192 }
193 193 // Note that the new argument slots are tagged properly but contain
194 194 // garbage at this point. The value portions must be initialized
195 195 // by the caller. (Especially references!)
196 196 }
197 197 }
198 198
199 199 // Helper to remove argument slots from the stack.
200 200 // arg_slots must be a multiple of stack_move_unit() and >= 0
201 201 void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
202 202 RegisterOrConstant arg_slots,
203 203 Register rax_argslot,
204 204 Register rbx_temp, Register rdx_temp) {
205 205 assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
206 206 (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
207 207
208 208 #ifdef ASSERT
209 209 {
210 210 // Verify that [argslot..argslot+size) lies within (rsp, rbp).
211 211 Label L_ok, L_bad;
212 212 __ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr));
213 213 __ cmpptr(rbx_temp, rbp);
214 214 __ jcc(Assembler::above, L_bad);
215 215 __ cmpptr(rsp, rax_argslot);
216 216 __ jcc(Assembler::below, L_ok);
217 217 __ bind(L_bad);
218 218 __ stop("deleted argument(s) must fall within current frame");
219 219 __ bind(L_ok);
220 220 }
221 221 if (arg_slots.is_register()) {
222 222 Label L_ok, L_bad;
223 223 __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
224 224 __ jcc(Assembler::less, L_bad);
225 225 __ testl(arg_slots.as_register(), -stack_move_unit() - 1);
226 226 __ jcc(Assembler::zero, L_ok);
227 227 __ bind(L_bad);
228 228 __ stop("assert arg_slots >= 0 and clear low bits");
229 229 __ bind(L_ok);
230 230 } else {
231 231 assert(arg_slots.as_constant() >= 0, "");
232 232 assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
233 233 }
234 234 #endif //ASSERT
235 235
236 236 #ifdef _LP64
237 237 if (false) { // not needed, since register is positive
238 238 // clean high bits of stack motion register (was loaded as an int)
239 239 if (arg_slots.is_register())
240 240 __ movslq(arg_slots.as_register(), arg_slots.as_register());
241 241 }
242 242 #endif
243 243
244 244 // Pull up everything shallower than rax_argslot.
245 245 // Then remove the excess space on the stack.
246 246 // The stacked return address gets pulled up with everything else.
247 247 // That is, copy [rsp, argslot) upward by size words. In pseudo-code:
248 248 // for (rdx = argslot-1; rdx >= rsp; --rdx)
249 249 // rdx[size] = rdx[0]
250 250 // argslot += size;
251 251 // rsp += size;
252 252 __ lea(rdx_temp, Address(rax_argslot, -wordSize)); // source pointer for copy
253 253 {
254 254 Label loop;
255 255 __ bind(loop);
256 256 // pull one word up each time through the loop
257 257 __ movptr(rbx_temp, Address(rdx_temp, 0));
258 258 __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
259 259 __ addptr(rdx_temp, -wordSize);
260 260 __ cmpptr(rdx_temp, rsp);
↓ open down ↓ |
260 lines elided |
↑ open up ↑ |
261 261 __ jcc(Assembler::greaterEqual, loop);
262 262 }
263 263
264 264 // Now move the argslot up, to point to the just-copied block.
265 265 __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr));
266 266 // And adjust the argslot address to point at the deletion point.
267 267 __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
268 268 }
269 269
270 270 #ifndef PRODUCT
271 +extern "C" void print_method_handle(oopDesc* mh);
271 272 void trace_method_handle_stub(const char* adaptername,
272 273 oopDesc* mh,
273 274 intptr_t* entry_sp,
274 275 intptr_t* saved_sp,
275 276 intptr_t* saved_bp) {
276 277 // called as a leaf from native code: do not block the JVM!
277 278 intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
278 279 intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
279 280 printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n",
280 281 adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
281 282 if (last_sp != saved_sp)
282 283 printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
284 + if (Verbose) print_method_handle(mh);
283 285 }
284 286 #endif //PRODUCT
285 287
286 288 // Generate an "entry" field for a method handle.
287 289 // This determines how the method handle will respond to calls.
288 290 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
289 291 // Here is the register state during an interpreted call,
290 292 // as set up by generate_method_handle_interpreter_entry():
291 293 // - rbx: garbage temp (was MethodHandle.invoke methodOop, unused)
292 294 // - rcx: receiver method handle
293 295 // - rax: method handle type (only used by the check_mtype entry point)
294 296 // - rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
295 297 // - rdx: garbage temp, can blow away
296 298
297 299 Register rcx_recv = rcx;
298 300 Register rax_argslot = rax;
299 301 Register rbx_temp = rbx;
300 302 Register rdx_temp = rdx;
301 303
302 304 // This guy is set up by prepare_to_jump_from_interpreted (from interpreted calls)
303 305 // and gen_c2i_adapter (from compiled calls):
304 306 Register saved_last_sp = LP64_ONLY(r13) NOT_LP64(rsi);
305 307
306 308 guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
307 309
308 310 // some handy addresses
309 311 Address rbx_method_fie( rbx, methodOopDesc::from_interpreted_offset() );
310 312
311 313 Address rcx_mh_vmtarget( rcx_recv, java_dyn_MethodHandle::vmtarget_offset_in_bytes() );
312 314 Address rcx_dmh_vmindex( rcx_recv, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes() );
313 315
314 316 Address rcx_bmh_vmargslot( rcx_recv, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes() );
315 317 Address rcx_bmh_argument( rcx_recv, sun_dyn_BoundMethodHandle::argument_offset_in_bytes() );
316 318
317 319 Address rcx_amh_vmargslot( rcx_recv, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes() );
318 320 Address rcx_amh_argument( rcx_recv, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes() );
319 321 Address rcx_amh_conversion( rcx_recv, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes() );
320 322 Address vmarg; // __ argument_address(vmargslot)
321 323
322 324 int tag_offset = -1;
323 325 if (TaggedStackInterpreter) {
324 326 tag_offset = Interpreter::tag_offset_in_bytes() - Interpreter::value_offset_in_bytes();
325 327 assert(tag_offset = wordSize, "stack grows as expected");
326 328 }
327 329
328 330 const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
329 331
330 332 if (have_entry(ek)) {
331 333 __ nop(); // empty stubs make SG sick
332 334 return;
333 335 }
334 336
335 337 address interp_entry = __ pc();
336 338 if (UseCompressedOops) __ unimplemented("UseCompressedOops");
337 339
338 340 #ifndef PRODUCT
339 341 if (TraceMethodHandles) {
340 342 __ push(rax); __ push(rbx); __ push(rcx); __ push(rdx); __ push(rsi); __ push(rdi);
341 343 __ lea(rax, Address(rsp, wordSize*6)); // entry_sp
342 344 // arguments:
343 345 __ push(rbp); // interpreter frame pointer
344 346 __ push(rsi); // saved_sp
345 347 __ push(rax); // entry_sp
346 348 __ push(rcx); // mh
347 349 __ push(rcx);
348 350 __ movptr(Address(rsp, 0), (intptr_t)entry_name(ek));
349 351 __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), 5);
350 352 __ pop(rdi); __ pop(rsi); __ pop(rdx); __ pop(rcx); __ pop(rbx); __ pop(rax);
351 353 }
352 354 #endif //PRODUCT
353 355
354 356 switch ((int) ek) {
355 357 case _raise_exception:
356 358 {
357 359 // Not a real MH entry, but rather shared code for raising an exception.
358 360 // Extra local arguments are pushed on stack, as required type at TOS+8,
359 361 // failing object (or NULL) at TOS+4, failing bytecode type at TOS.
360 362 // Beyond those local arguments are the PC, of course.
361 363 Register rdx_code = rdx_temp;
362 364 Register rcx_fail = rcx_recv;
363 365 Register rax_want = rax_argslot;
364 366 Register rdi_pc = rdi;
365 367 __ pop(rdx_code); // TOS+0
366 368 __ pop(rcx_fail); // TOS+4
367 369 __ pop(rax_want); // TOS+8
368 370 __ pop(rdi_pc); // caller PC
369 371
370 372 __ mov(rsp, rsi); // cut the stack back to where the caller started
371 373
372 374 // Repush the arguments as if coming from the interpreter.
373 375 if (TaggedStackInterpreter) __ push(frame::tag_for_basic_type(T_INT));
374 376 __ push(rdx_code);
375 377 if (TaggedStackInterpreter) __ push(frame::tag_for_basic_type(T_OBJECT));
376 378 __ push(rcx_fail);
377 379 if (TaggedStackInterpreter) __ push(frame::tag_for_basic_type(T_OBJECT));
378 380 __ push(rax_want);
379 381
380 382 Register rbx_method = rbx_temp;
381 383 Label no_method;
382 384 // FIXME: fill in _raise_exception_method with a suitable sun.dyn method
383 385 __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
384 386 __ testptr(rbx_method, rbx_method);
385 387 __ jcc(Assembler::zero, no_method);
386 388 int jobject_oop_offset = 0;
387 389 __ movptr(rbx_method, Address(rbx_method, jobject_oop_offset)); // dereference the jobject
388 390 __ testptr(rbx_method, rbx_method);
389 391 __ jcc(Assembler::zero, no_method);
390 392 __ verify_oop(rbx_method);
391 393 __ push(rdi_pc); // and restore caller PC
392 394 __ jmp(rbx_method_fie);
393 395
394 396 // If we get here, the Java runtime did not do its job of creating the exception.
395 397 // Do something that is at least causes a valid throw from the interpreter.
396 398 __ bind(no_method);
397 399 __ pop(rax_want);
398 400 if (TaggedStackInterpreter) __ pop(rcx_fail);
399 401 __ pop(rcx_fail);
400 402 __ push(rax_want);
401 403 __ push(rcx_fail);
402 404 __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
403 405 }
404 406 break;
405 407
406 408 case _invokestatic_mh:
407 409 case _invokespecial_mh:
408 410 {
409 411 Register rbx_method = rbx_temp;
410 412 __ movptr(rbx_method, rcx_mh_vmtarget); // target is a methodOop
411 413 __ verify_oop(rbx_method);
412 414 // same as TemplateTable::invokestatic or invokespecial,
413 415 // minus the CP setup and profiling:
414 416 if (ek == _invokespecial_mh) {
415 417 // Must load & check the first argument before entering the target method.
416 418 __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
417 419 __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
418 420 __ null_check(rcx_recv);
419 421 __ verify_oop(rcx_recv);
420 422 }
421 423 __ jmp(rbx_method_fie);
422 424 }
423 425 break;
424 426
425 427 case _invokevirtual_mh:
426 428 {
427 429 // same as TemplateTable::invokevirtual,
428 430 // minus the CP setup and profiling:
429 431
430 432 // pick out the vtable index and receiver offset from the MH,
431 433 // and then we can discard it:
432 434 __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
433 435 Register rbx_index = rbx_temp;
434 436 __ movl(rbx_index, rcx_dmh_vmindex);
435 437 // Note: The verifier allows us to ignore rcx_mh_vmtarget.
436 438 __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
437 439 __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes());
438 440
439 441 // get receiver klass
440 442 Register rax_klass = rax_argslot;
441 443 __ load_klass(rax_klass, rcx_recv);
442 444 __ verify_oop(rax_klass);
443 445
444 446 // get target methodOop & entry point
445 447 const int base = instanceKlass::vtable_start_offset() * wordSize;
446 448 assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
447 449 Address vtable_entry_addr(rax_klass,
448 450 rbx_index, Address::times_ptr,
449 451 base + vtableEntry::method_offset_in_bytes());
450 452 Register rbx_method = rbx_temp;
451 453 __ movptr(rbx_method, vtable_entry_addr);
452 454
453 455 __ verify_oop(rbx_method);
454 456 __ jmp(rbx_method_fie);
455 457 }
456 458 break;
457 459
458 460 case _invokeinterface_mh:
459 461 {
460 462 // same as TemplateTable::invokeinterface,
461 463 // minus the CP setup and profiling:
462 464
463 465 // pick out the interface and itable index from the MH.
464 466 __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
465 467 Register rdx_intf = rdx_temp;
466 468 Register rbx_index = rbx_temp;
467 469 __ movptr(rdx_intf, rcx_mh_vmtarget);
468 470 __ movl(rbx_index, rcx_dmh_vmindex);
469 471 __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
470 472 __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes());
471 473
472 474 // get receiver klass
473 475 Register rax_klass = rax_argslot;
474 476 __ load_klass(rax_klass, rcx_recv);
475 477 __ verify_oop(rax_klass);
476 478
477 479 Register rdi_temp = rdi;
478 480 Register rbx_method = rbx_index;
479 481
480 482 // get interface klass
481 483 Label no_such_interface;
482 484 __ verify_oop(rdx_intf);
483 485 __ lookup_interface_method(rax_klass, rdx_intf,
484 486 // note: next two args must be the same:
485 487 rbx_index, rbx_method,
486 488 rdi_temp,
487 489 no_such_interface);
488 490
489 491 __ verify_oop(rbx_method);
490 492 __ jmp(rbx_method_fie);
491 493 __ hlt();
492 494
493 495 __ bind(no_such_interface);
494 496 // Throw an exception.
495 497 // For historical reasons, it will be IncompatibleClassChangeError.
496 498 __ pushptr(Address(rdx_intf, java_mirror_offset)); // required interface
497 499 __ push(rcx_recv); // bad receiver
498 500 __ push((int)Bytecodes::_invokeinterface); // who is complaining?
499 501 __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
500 502 }
501 503 break;
502 504
503 505 case _bound_ref_mh:
504 506 case _bound_int_mh:
505 507 case _bound_long_mh:
506 508 case _bound_ref_direct_mh:
507 509 case _bound_int_direct_mh:
508 510 case _bound_long_direct_mh:
509 511 {
510 512 bool direct_to_method = (ek >= _bound_ref_direct_mh);
511 513 BasicType arg_type = T_ILLEGAL;
512 514 if (ek == _bound_long_mh || ek == _bound_long_direct_mh) {
513 515 arg_type = T_LONG;
514 516 } else if (ek == _bound_int_mh || ek == _bound_int_direct_mh) {
515 517 arg_type = T_INT;
516 518 } else {
517 519 assert(ek == _bound_ref_mh || ek == _bound_ref_direct_mh, "must be ref");
518 520 arg_type = T_OBJECT;
519 521 }
520 522 int arg_slots = type2size[arg_type];
521 523 int arg_mask = (arg_type == T_OBJECT ? _INSERT_REF_MASK :
522 524 arg_slots == 1 ? _INSERT_INT_MASK : _INSERT_LONG_MASK);
523 525
524 526 // make room for the new argument:
525 527 __ movl(rax_argslot, rcx_bmh_vmargslot);
526 528 __ lea(rax_argslot, __ argument_address(rax_argslot));
527 529 insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask,
528 530 rax_argslot, rbx_temp, rdx_temp);
529 531
530 532 // store bound argument into the new stack slot:
531 533 __ movptr(rbx_temp, rcx_bmh_argument);
532 534 Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type));
533 535 if (arg_type == T_OBJECT) {
534 536 __ movptr(Address(rax_argslot, 0), rbx_temp);
535 537 } else {
536 538 __ load_sized_value(rbx_temp, prim_value_addr,
537 539 type2aelembytes(arg_type), is_signed_subword_type(arg_type));
538 540 __ movptr(Address(rax_argslot, 0), rbx_temp);
539 541 #ifndef _LP64
540 542 if (arg_slots == 2) {
541 543 __ movl(rbx_temp, prim_value_addr.plus_disp(wordSize));
542 544 __ movl(Address(rax_argslot, Interpreter::stackElementSize()), rbx_temp);
543 545 }
544 546 #endif //_LP64
545 547 break;
546 548 }
547 549
548 550 if (direct_to_method) {
549 551 Register rbx_method = rbx_temp;
550 552 __ movptr(rbx_method, rcx_mh_vmtarget);
551 553 __ verify_oop(rbx_method);
552 554 __ jmp(rbx_method_fie);
553 555 } else {
554 556 __ movptr(rcx_recv, rcx_mh_vmtarget);
555 557 __ verify_oop(rcx_recv);
556 558 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
557 559 }
558 560 }
559 561 break;
560 562
561 563 case _adapter_retype_only:
562 564 case _adapter_retype_raw:
563 565 // immediately jump to the next MH layer:
564 566 __ movptr(rcx_recv, rcx_mh_vmtarget);
565 567 __ verify_oop(rcx_recv);
566 568 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
567 569 // This is OK when all parameter types widen.
568 570 // It is also OK when a return type narrows.
569 571 break;
570 572
571 573 case _adapter_check_cast:
572 574 {
573 575 // temps:
574 576 Register rbx_klass = rbx_temp; // interesting AMH data
575 577
576 578 // check a reference argument before jumping to the next layer of MH:
577 579 __ movl(rax_argslot, rcx_amh_vmargslot);
578 580 vmarg = __ argument_address(rax_argslot);
579 581
580 582 // What class are we casting to?
581 583 __ movptr(rbx_klass, rcx_amh_argument); // this is a Class object!
582 584 __ movptr(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));
583 585
584 586 Label done;
585 587 __ movptr(rdx_temp, vmarg);
586 588 __ testl(rdx_temp, rdx_temp);
587 589 __ jcc(Assembler::zero, done); // no cast if null
588 590 __ load_klass(rdx_temp, rdx_temp);
589 591
590 592 // live at this point:
591 593 // - rbx_klass: klass required by the target method
592 594 // - rdx_temp: argument klass to test
593 595 // - rcx_recv: adapter method handle
594 596 __ check_klass_subtype(rdx_temp, rbx_klass, rax_argslot, done);
595 597
596 598 // If we get here, the type check failed!
597 599 // Call the wrong_method_type stub, passing the failing argument type in rax.
598 600 Register rax_mtype = rax_argslot;
599 601 __ movl(rax_argslot, rcx_amh_vmargslot); // reload argslot field
600 602 __ movptr(rdx_temp, vmarg);
601 603
602 604 __ pushptr(rcx_amh_argument); // required class
603 605 __ push(rdx_temp); // bad object
604 606 __ push((int)Bytecodes::_checkcast); // who is complaining?
605 607 __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
606 608
607 609 __ bind(done);
608 610 // get the new MH:
609 611 __ movptr(rcx_recv, rcx_mh_vmtarget);
610 612 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
611 613 }
612 614 break;
613 615
614 616 case _adapter_prim_to_prim:
615 617 case _adapter_ref_to_prim:
616 618 // handled completely by optimized cases
617 619 __ stop("init_AdapterMethodHandle should not issue this");
618 620 break;
619 621
620 622 case _adapter_opt_i2i: // optimized subcase of adapt_prim_to_prim
621 623 //case _adapter_opt_f2i: // optimized subcase of adapt_prim_to_prim
622 624 case _adapter_opt_l2i: // optimized subcase of adapt_prim_to_prim
623 625 case _adapter_opt_unboxi: // optimized subcase of adapt_ref_to_prim
624 626 {
625 627 // perform an in-place conversion to int or an int subword
626 628 __ movl(rax_argslot, rcx_amh_vmargslot);
627 629 vmarg = __ argument_address(rax_argslot);
628 630
629 631 switch (ek) {
630 632 case _adapter_opt_i2i:
631 633 __ movl(rdx_temp, vmarg);
632 634 break;
633 635 case _adapter_opt_l2i:
634 636 {
635 637 // just delete the extra slot; on a little-endian machine we keep the first
636 638 __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
637 639 remove_arg_slots(_masm, -stack_move_unit(),
638 640 rax_argslot, rbx_temp, rdx_temp);
639 641 vmarg = Address(rax_argslot, -Interpreter::stackElementSize());
640 642 __ movl(rdx_temp, vmarg);
641 643 }
642 644 break;
643 645 case _adapter_opt_unboxi:
644 646 {
645 647 // Load the value up from the heap.
646 648 __ movptr(rdx_temp, vmarg);
647 649 int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
648 650 #ifdef ASSERT
649 651 for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
650 652 if (is_subword_type(BasicType(bt)))
651 653 assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
652 654 }
653 655 #endif
654 656 __ null_check(rdx_temp, value_offset);
655 657 __ movl(rdx_temp, Address(rdx_temp, value_offset));
656 658 // We load this as a word. Because we are little-endian,
657 659 // the low bits will be correct, but the high bits may need cleaning.
658 660 // The vminfo will guide us to clean those bits.
659 661 }
660 662 break;
661 663 default:
662 664 assert(false, "");
663 665 }
664 666 goto finish_int_conversion;
665 667 }
666 668
667 669 finish_int_conversion:
668 670 {
669 671 Register rbx_vminfo = rbx_temp;
670 672 __ movl(rbx_vminfo, rcx_amh_conversion);
671 673 assert(CONV_VMINFO_SHIFT == 0, "preshifted");
672 674
673 675 // get the new MH:
674 676 __ movptr(rcx_recv, rcx_mh_vmtarget);
675 677 // (now we are done with the old MH)
676 678
677 679 // original 32-bit vmdata word must be of this form:
678 680 // | MBZ:16 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
679 681 __ xchgl(rcx, rbx_vminfo); // free rcx for shifts
680 682 __ shll(rdx_temp /*, rcx*/);
681 683 Label zero_extend, done;
682 684 __ testl(rcx, CONV_VMINFO_SIGN_FLAG);
683 685 __ jcc(Assembler::zero, zero_extend);
684 686
685 687 // this path is taken for int->byte, int->short
686 688 __ sarl(rdx_temp /*, rcx*/);
687 689 __ jmp(done);
688 690
689 691 __ bind(zero_extend);
690 692 // this is taken for int->char
691 693 __ shrl(rdx_temp /*, rcx*/);
692 694
693 695 __ bind(done);
694 696 __ movptr(vmarg, rdx_temp);
695 697 __ xchgl(rcx, rbx_vminfo); // restore rcx_recv
696 698
697 699 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
698 700 }
699 701 break;
700 702
701 703 case _adapter_opt_i2l: // optimized subcase of adapt_prim_to_prim
702 704 case _adapter_opt_unboxl: // optimized subcase of adapt_ref_to_prim
703 705 {
704 706 // perform an in-place int-to-long or ref-to-long conversion
705 707 __ movl(rax_argslot, rcx_amh_vmargslot);
706 708
707 709 // on a little-endian machine we keep the first slot and add another after
708 710 __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
709 711 insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
710 712 rax_argslot, rbx_temp, rdx_temp);
711 713 Address vmarg1(rax_argslot, -Interpreter::stackElementSize());
712 714 Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize());
713 715
714 716 switch (ek) {
715 717 case _adapter_opt_i2l:
716 718 {
717 719 __ movl(rdx_temp, vmarg1);
718 720 __ sarl(rdx_temp, 31); // __ extend_sign()
719 721 __ movl(vmarg2, rdx_temp); // store second word
720 722 }
721 723 break;
722 724 case _adapter_opt_unboxl:
723 725 {
724 726 // Load the value up from the heap.
725 727 __ movptr(rdx_temp, vmarg1);
726 728 int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
727 729 assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
728 730 __ null_check(rdx_temp, value_offset);
729 731 __ movl(rbx_temp, Address(rdx_temp, value_offset + 0*BytesPerInt));
730 732 __ movl(rdx_temp, Address(rdx_temp, value_offset + 1*BytesPerInt));
731 733 __ movl(vmarg1, rbx_temp);
732 734 __ movl(vmarg2, rdx_temp);
733 735 }
734 736 break;
735 737 default:
736 738 assert(false, "");
737 739 }
738 740
739 741 __ movptr(rcx_recv, rcx_mh_vmtarget);
740 742 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
741 743 }
742 744 break;
743 745
744 746 case _adapter_opt_f2d: // optimized subcase of adapt_prim_to_prim
745 747 case _adapter_opt_d2f: // optimized subcase of adapt_prim_to_prim
746 748 {
747 749 // perform an in-place floating primitive conversion
748 750 __ movl(rax_argslot, rcx_amh_vmargslot);
749 751 __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
750 752 if (ek == _adapter_opt_f2d) {
751 753 insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
752 754 rax_argslot, rbx_temp, rdx_temp);
753 755 }
754 756 Address vmarg(rax_argslot, -Interpreter::stackElementSize());
755 757
756 758 #ifdef _LP64
757 759 if (ek == _adapter_opt_f2d) {
758 760 __ movflt(xmm0, vmarg);
759 761 __ cvtss2sd(xmm0, xmm0);
760 762 __ movdbl(vmarg, xmm0);
761 763 } else {
762 764 __ movdbl(xmm0, vmarg);
763 765 __ cvtsd2ss(xmm0, xmm0);
764 766 __ movflt(vmarg, xmm0);
765 767 }
766 768 #else //_LP64
767 769 if (ek == _adapter_opt_f2d) {
768 770 __ fld_s(vmarg); // load float to ST0
769 771 __ fstp_s(vmarg); // store single
770 772 } else if (!TaggedStackInterpreter) {
771 773 __ fld_d(vmarg); // load double to ST0
772 774 __ fstp_s(vmarg); // store single
773 775 } else {
774 776 Address vmarg_tag = vmarg.plus_disp(tag_offset);
775 777 Address vmarg2 = vmarg.plus_disp(Interpreter::stackElementSize());
776 778 // vmarg2_tag does not participate in this code
777 779 Register rbx_tag = rbx_temp;
778 780 __ movl(rbx_tag, vmarg_tag); // preserve tag
779 781 __ movl(rdx_temp, vmarg2); // get second word of double
780 782 __ movl(vmarg_tag, rdx_temp); // align with first word
781 783 __ fld_d(vmarg); // load double to ST0
782 784 __ movl(vmarg_tag, rbx_tag); // restore tag
783 785 __ fstp_s(vmarg); // store single
784 786 }
785 787 #endif //_LP64
786 788
787 789 if (ek == _adapter_opt_d2f) {
788 790 remove_arg_slots(_masm, -stack_move_unit(),
789 791 rax_argslot, rbx_temp, rdx_temp);
790 792 }
791 793
792 794 __ movptr(rcx_recv, rcx_mh_vmtarget);
793 795 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
794 796 }
795 797 break;
796 798
797 799 case _adapter_prim_to_ref:
798 800 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
799 801 break;
800 802
801 803 case _adapter_swap_args:
802 804 case _adapter_rot_args:
803 805 // handled completely by optimized cases
804 806 __ stop("init_AdapterMethodHandle should not issue this");
805 807 break;
806 808
807 809 case _adapter_opt_swap_1:
808 810 case _adapter_opt_swap_2:
809 811 case _adapter_opt_rot_1_up:
810 812 case _adapter_opt_rot_1_down:
811 813 case _adapter_opt_rot_2_up:
812 814 case _adapter_opt_rot_2_down:
813 815 {
814 816 int rotate = 0, swap_slots = 0;
815 817 switch ((int)ek) {
816 818 case _adapter_opt_swap_1: swap_slots = 1; break;
817 819 case _adapter_opt_swap_2: swap_slots = 2; break;
818 820 case _adapter_opt_rot_1_up: swap_slots = 1; rotate++; break;
819 821 case _adapter_opt_rot_1_down: swap_slots = 1; rotate--; break;
820 822 case _adapter_opt_rot_2_up: swap_slots = 2; rotate++; break;
821 823 case _adapter_opt_rot_2_down: swap_slots = 2; rotate--; break;
822 824 default: assert(false, "");
823 825 }
824 826
825 827 // the real size of the move must be doubled if TaggedStackInterpreter:
826 828 int swap_bytes = (int)( swap_slots * Interpreter::stackElementWords() * wordSize );
827 829
828 830 // 'argslot' is the position of the first argument to swap
829 831 __ movl(rax_argslot, rcx_amh_vmargslot);
830 832 __ lea(rax_argslot, __ argument_address(rax_argslot));
831 833
832 834 // 'vminfo' is the second
833 835 Register rbx_destslot = rbx_temp;
834 836 __ movl(rbx_destslot, rcx_amh_conversion);
835 837 assert(CONV_VMINFO_SHIFT == 0, "preshifted");
836 838 __ andl(rbx_destslot, CONV_VMINFO_MASK);
837 839 __ lea(rbx_destslot, __ argument_address(rbx_destslot));
838 840 DEBUG_ONLY(verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame"));
839 841
840 842 if (!rotate) {
841 843 for (int i = 0; i < swap_bytes; i += wordSize) {
842 844 __ movptr(rdx_temp, Address(rax_argslot , i));
843 845 __ push(rdx_temp);
844 846 __ movptr(rdx_temp, Address(rbx_destslot, i));
845 847 __ movptr(Address(rax_argslot, i), rdx_temp);
846 848 __ pop(rdx_temp);
847 849 __ movptr(Address(rbx_destslot, i), rdx_temp);
848 850 }
849 851 } else {
850 852 // push the first chunk, which is going to get overwritten
851 853 for (int i = swap_bytes; (i -= wordSize) >= 0; ) {
852 854 __ movptr(rdx_temp, Address(rax_argslot, i));
853 855 __ push(rdx_temp);
854 856 }
855 857
856 858 if (rotate > 0) {
857 859 // rotate upward
858 860 __ subptr(rax_argslot, swap_bytes);
859 861 #ifdef ASSERT
860 862 {
861 863 // Verify that argslot > destslot, by at least swap_bytes.
862 864 Label L_ok;
863 865 __ cmpptr(rax_argslot, rbx_destslot);
864 866 __ jcc(Assembler::aboveEqual, L_ok);
865 867 __ stop("source must be above destination (upward rotation)");
866 868 __ bind(L_ok);
867 869 }
868 870 #endif
869 871 // work argslot down to destslot, copying contiguous data upwards
870 872 // pseudo-code:
871 873 // rax = src_addr - swap_bytes
872 874 // rbx = dest_addr
873 875 // while (rax >= rbx) *(rax + swap_bytes) = *(rax + 0), rax--;
874 876 Label loop;
875 877 __ bind(loop);
876 878 __ movptr(rdx_temp, Address(rax_argslot, 0));
877 879 __ movptr(Address(rax_argslot, swap_bytes), rdx_temp);
878 880 __ addptr(rax_argslot, -wordSize);
879 881 __ cmpptr(rax_argslot, rbx_destslot);
880 882 __ jcc(Assembler::aboveEqual, loop);
881 883 } else {
882 884 __ addptr(rax_argslot, swap_bytes);
883 885 #ifdef ASSERT
884 886 {
885 887 // Verify that argslot < destslot, by at least swap_bytes.
886 888 Label L_ok;
887 889 __ cmpptr(rax_argslot, rbx_destslot);
888 890 __ jcc(Assembler::belowEqual, L_ok);
889 891 __ stop("source must be below destination (downward rotation)");
890 892 __ bind(L_ok);
891 893 }
892 894 #endif
893 895 // work argslot up to destslot, copying contiguous data downwards
894 896 // pseudo-code:
895 897 // rax = src_addr + swap_bytes
896 898 // rbx = dest_addr
897 899 // while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++;
898 900 Label loop;
899 901 __ bind(loop);
900 902 __ movptr(rdx_temp, Address(rax_argslot, 0));
901 903 __ movptr(Address(rax_argslot, -swap_bytes), rdx_temp);
902 904 __ addptr(rax_argslot, wordSize);
903 905 __ cmpptr(rax_argslot, rbx_destslot);
904 906 __ jcc(Assembler::belowEqual, loop);
905 907 }
906 908
907 909 // pop the original first chunk into the destination slot, now free
908 910 for (int i = 0; i < swap_bytes; i += wordSize) {
909 911 __ pop(rdx_temp);
910 912 __ movptr(Address(rbx_destslot, i), rdx_temp);
911 913 }
912 914 }
913 915
914 916 __ movptr(rcx_recv, rcx_mh_vmtarget);
915 917 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
916 918 }
917 919 break;
918 920
919 921 case _adapter_dup_args:
920 922 {
921 923 // 'argslot' is the position of the first argument to duplicate
922 924 __ movl(rax_argslot, rcx_amh_vmargslot);
923 925 __ lea(rax_argslot, __ argument_address(rax_argslot));
924 926
925 927 // 'stack_move' is negative number of words to duplicate
926 928 Register rdx_stack_move = rdx_temp;
927 929 __ movl(rdx_stack_move, rcx_amh_conversion);
928 930 __ sarl(rdx_stack_move, CONV_STACK_MOVE_SHIFT);
929 931
930 932 int argslot0_num = 0;
931 933 Address argslot0 = __ argument_address(RegisterOrConstant(argslot0_num));
932 934 assert(argslot0.base() == rsp, "");
933 935 int pre_arg_size = argslot0.disp();
934 936 assert(pre_arg_size % wordSize == 0, "");
935 937 assert(pre_arg_size > 0, "must include PC");
936 938
937 939 // remember the old rsp+1 (argslot[0])
938 940 Register rbx_oldarg = rbx_temp;
939 941 __ lea(rbx_oldarg, argslot0);
940 942
941 943 // move rsp down to make room for dups
942 944 __ lea(rsp, Address(rsp, rdx_stack_move, Address::times_ptr));
943 945
944 946 // compute the new rsp+1 (argslot[0])
945 947 Register rdx_newarg = rdx_temp;
946 948 __ lea(rdx_newarg, argslot0);
947 949
948 950 __ push(rdi); // need a temp
949 951 // (preceding push must be done after arg addresses are taken!)
950 952
951 953 // pull down the pre_arg_size data (PC)
952 954 for (int i = -pre_arg_size; i < 0; i += wordSize) {
953 955 __ movptr(rdi, Address(rbx_oldarg, i));
954 956 __ movptr(Address(rdx_newarg, i), rdi);
955 957 }
956 958
957 959 // copy from rax_argslot[0...] down to new_rsp[1...]
958 960 // pseudo-code:
959 961 // rbx = old_rsp+1
960 962 // rdx = new_rsp+1
961 963 // rax = argslot
962 964 // while (rdx < rbx) *rdx++ = *rax++
963 965 Label loop;
964 966 __ bind(loop);
965 967 __ movptr(rdi, Address(rax_argslot, 0));
966 968 __ movptr(Address(rdx_newarg, 0), rdi);
967 969 __ addptr(rax_argslot, wordSize);
968 970 __ addptr(rdx_newarg, wordSize);
969 971 __ cmpptr(rdx_newarg, rbx_oldarg);
970 972 __ jcc(Assembler::less, loop);
971 973
972 974 __ pop(rdi); // restore temp
973 975
974 976 __ movptr(rcx_recv, rcx_mh_vmtarget);
975 977 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
976 978 }
977 979 break;
978 980
979 981 case _adapter_drop_args:
980 982 {
981 983 // 'argslot' is the position of the first argument to nuke
982 984 __ movl(rax_argslot, rcx_amh_vmargslot);
983 985 __ lea(rax_argslot, __ argument_address(rax_argslot));
984 986
985 987 __ push(rdi); // need a temp
986 988 // (must do previous push after argslot address is taken)
987 989
988 990 // 'stack_move' is number of words to drop
989 991 Register rdi_stack_move = rdi;
990 992 __ movl(rdi_stack_move, rcx_amh_conversion);
991 993 __ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
992 994 remove_arg_slots(_masm, rdi_stack_move,
993 995 rax_argslot, rbx_temp, rdx_temp);
994 996
995 997 __ pop(rdi); // restore temp
996 998
997 999 __ movptr(rcx_recv, rcx_mh_vmtarget);
998 1000 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
999 1001 }
1000 1002 break;
1001 1003
1002 1004 case _adapter_collect_args:
1003 1005 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
1004 1006 break;
1005 1007
1006 1008 case _adapter_spread_args:
1007 1009 // handled completely by optimized cases
1008 1010 __ stop("init_AdapterMethodHandle should not issue this");
1009 1011 break;
1010 1012
1011 1013 case _adapter_opt_spread_0:
1012 1014 case _adapter_opt_spread_1:
1013 1015 case _adapter_opt_spread_more:
1014 1016 {
1015 1017 // spread an array out into a group of arguments
1016 1018 int length_constant = -1;
1017 1019 switch (ek) {
1018 1020 case _adapter_opt_spread_0: length_constant = 0; break;
1019 1021 case _adapter_opt_spread_1: length_constant = 1; break;
1020 1022 }
1021 1023
1022 1024 // find the address of the array argument
1023 1025 __ movl(rax_argslot, rcx_amh_vmargslot);
1024 1026 __ lea(rax_argslot, __ argument_address(rax_argslot));
1025 1027
1026 1028 // grab some temps
1027 1029 { __ push(rsi); __ push(rdi); }
1028 1030 // (preceding pushes must be done after argslot address is taken!)
1029 1031 #define UNPUSH_RSI_RDI \
1030 1032 { __ pop(rdi); __ pop(rsi); }
1031 1033
1032 1034 // arx_argslot points both to the array and to the first output arg
1033 1035 vmarg = Address(rax_argslot, 0);
1034 1036
1035 1037 // Get the array value.
1036 1038 Register rsi_array = rsi;
1037 1039 Register rdx_array_klass = rdx_temp;
1038 1040 BasicType elem_type = T_OBJECT;
1039 1041 int length_offset = arrayOopDesc::length_offset_in_bytes();
1040 1042 int elem0_offset = arrayOopDesc::base_offset_in_bytes(elem_type);
1041 1043 __ movptr(rsi_array, vmarg);
1042 1044 Label skip_array_check;
1043 1045 if (length_constant == 0) {
1044 1046 __ testptr(rsi_array, rsi_array);
1045 1047 __ jcc(Assembler::zero, skip_array_check);
1046 1048 }
1047 1049 __ null_check(rsi_array, oopDesc::klass_offset_in_bytes());
1048 1050 __ load_klass(rdx_array_klass, rsi_array);
1049 1051
1050 1052 // Check the array type.
1051 1053 Register rbx_klass = rbx_temp;
1052 1054 __ movptr(rbx_klass, rcx_amh_argument); // this is a Class object!
1053 1055 __ movptr(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));
1054 1056
1055 1057 Label ok_array_klass, bad_array_klass, bad_array_length;
1056 1058 __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi, ok_array_klass);
1057 1059 // If we get here, the type check failed!
1058 1060 __ jmp(bad_array_klass);
1059 1061 __ bind(ok_array_klass);
1060 1062
1061 1063 // Check length.
1062 1064 if (length_constant >= 0) {
1063 1065 __ cmpl(Address(rsi_array, length_offset), length_constant);
1064 1066 } else {
1065 1067 Register rbx_vminfo = rbx_temp;
1066 1068 __ movl(rbx_vminfo, rcx_amh_conversion);
1067 1069 assert(CONV_VMINFO_SHIFT == 0, "preshifted");
1068 1070 __ andl(rbx_vminfo, CONV_VMINFO_MASK);
1069 1071 __ cmpl(rbx_vminfo, Address(rsi_array, length_offset));
1070 1072 }
1071 1073 __ jcc(Assembler::notEqual, bad_array_length);
1072 1074
1073 1075 Register rdx_argslot_limit = rdx_temp;
1074 1076
1075 1077 // Array length checks out. Now insert any required stack slots.
1076 1078 if (length_constant == -1) {
1077 1079 // Form a pointer to the end of the affected region.
1078 1080 __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize()));
1079 1081 // 'stack_move' is negative number of words to insert
1080 1082 Register rdi_stack_move = rdi;
1081 1083 __ movl(rdi_stack_move, rcx_amh_conversion);
1082 1084 __ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
1083 1085 Register rsi_temp = rsi_array; // spill this
1084 1086 insert_arg_slots(_masm, rdi_stack_move, -1,
1085 1087 rax_argslot, rbx_temp, rsi_temp);
1086 1088 // reload the array (since rsi was killed)
1087 1089 __ movptr(rsi_array, vmarg);
1088 1090 } else if (length_constant > 1) {
1089 1091 int arg_mask = 0;
1090 1092 int new_slots = (length_constant - 1);
1091 1093 for (int i = 0; i < new_slots; i++) {
1092 1094 arg_mask <<= 1;
1093 1095 arg_mask |= _INSERT_REF_MASK;
1094 1096 }
1095 1097 insert_arg_slots(_masm, new_slots * stack_move_unit(), arg_mask,
1096 1098 rax_argslot, rbx_temp, rdx_temp);
1097 1099 } else if (length_constant == 1) {
1098 1100 // no stack resizing required
1099 1101 } else if (length_constant == 0) {
1100 1102 remove_arg_slots(_masm, -stack_move_unit(),
1101 1103 rax_argslot, rbx_temp, rdx_temp);
1102 1104 }
1103 1105
1104 1106 // Copy from the array to the new slots.
1105 1107 // Note: Stack change code preserves integrity of rax_argslot pointer.
1106 1108 // So even after slot insertions, rax_argslot still points to first argument.
1107 1109 if (length_constant == -1) {
1108 1110 // [rax_argslot, rdx_argslot_limit) is the area we are inserting into.
1109 1111 Register rsi_source = rsi_array;
1110 1112 __ lea(rsi_source, Address(rsi_array, elem0_offset));
1111 1113 Label loop;
1112 1114 __ bind(loop);
1113 1115 __ movptr(rbx_temp, Address(rsi_source, 0));
1114 1116 __ movptr(Address(rax_argslot, 0), rbx_temp);
1115 1117 __ addptr(rsi_source, type2aelembytes(elem_type));
1116 1118 if (TaggedStackInterpreter) {
1117 1119 __ movptr(Address(rax_argslot, tag_offset),
1118 1120 frame::tag_for_basic_type(elem_type));
1119 1121 }
1120 1122 __ addptr(rax_argslot, Interpreter::stackElementSize());
1121 1123 __ cmpptr(rax_argslot, rdx_argslot_limit);
1122 1124 __ jcc(Assembler::less, loop);
1123 1125 } else if (length_constant == 0) {
1124 1126 __ bind(skip_array_check);
1125 1127 // nothing to copy
1126 1128 } else {
1127 1129 int elem_offset = elem0_offset;
1128 1130 int slot_offset = 0;
1129 1131 for (int index = 0; index < length_constant; index++) {
1130 1132 __ movptr(rbx_temp, Address(rsi_array, elem_offset));
1131 1133 __ movptr(Address(rax_argslot, slot_offset), rbx_temp);
1132 1134 elem_offset += type2aelembytes(elem_type);
1133 1135 if (TaggedStackInterpreter) {
1134 1136 __ movptr(Address(rax_argslot, slot_offset + tag_offset),
1135 1137 frame::tag_for_basic_type(elem_type));
1136 1138 }
1137 1139 slot_offset += Interpreter::stackElementSize();
1138 1140 }
1139 1141 }
1140 1142
1141 1143 // Arguments are spread. Move to next method handle.
1142 1144 UNPUSH_RSI_RDI;
1143 1145 __ movptr(rcx_recv, rcx_mh_vmtarget);
1144 1146 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
1145 1147
1146 1148 __ bind(bad_array_klass);
1147 1149 UNPUSH_RSI_RDI;
1148 1150 __ pushptr(Address(rdx_array_klass, java_mirror_offset)); // required type
1149 1151 __ pushptr(vmarg); // bad array
1150 1152 __ push((int)Bytecodes::_aaload); // who is complaining?
1151 1153 __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
1152 1154
1153 1155 __ bind(bad_array_length);
1154 1156 UNPUSH_RSI_RDI;
1155 1157 __ push(rcx_recv); // AMH requiring a certain length
1156 1158 __ pushptr(vmarg); // bad array
1157 1159 __ push((int)Bytecodes::_arraylength); // who is complaining?
1158 1160 __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
1159 1161
1160 1162 #undef UNPUSH_RSI_RDI
1161 1163 }
1162 1164 break;
1163 1165
1164 1166 case _adapter_flyby:
1165 1167 case _adapter_ricochet:
1166 1168 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
1167 1169 break;
1168 1170
1169 1171 default: ShouldNotReachHere();
1170 1172 }
1171 1173 __ hlt();
1172 1174
1173 1175 address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
1174 1176 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
1175 1177
1176 1178 init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
1177 1179 }
↓ open down ↓ |
885 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX