Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/x86/vm/methodHandles_x86.cpp
+++ new/src/cpu/x86/vm/methodHandles_x86.cpp
1 1 /*
2 2 * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 #include "incls/_precompiled.incl"
26 26 #include "incls/_methodHandles_x86.cpp.incl"
27 27
28 28 #define __ _masm->
29 29
30 30 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
31 31 address interpreted_entry) {
32 32 // Just before the actual machine code entry point, allocate space
33 33 // for a MethodHandleEntry::Data record, so that we can manage everything
34 34 // from one base pointer.
35 35 __ align(wordSize);
36 36 address target = __ pc() + sizeof(Data);
37 37 while (__ pc() < target) {
38 38 __ nop();
39 39 __ align(wordSize);
40 40 }
41 41
42 42 MethodHandleEntry* me = (MethodHandleEntry*) __ pc();
43 43 me->set_end_address(__ pc()); // set a temporary end_address
44 44 me->set_from_interpreted_entry(interpreted_entry);
45 45 me->set_type_checking_entry(NULL);
46 46
47 47 return (address) me;
48 48 }
49 49
50 50 MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm,
51 51 address start_addr) {
52 52 MethodHandleEntry* me = (MethodHandleEntry*) start_addr;
53 53 assert(me->end_address() == start_addr, "valid ME");
54 54
55 55 // Fill in the real end_address:
56 56 __ align(wordSize);
57 57 me->set_end_address(__ pc());
58 58
59 59 return me;
60 60 }
61 61
62 62 #ifdef ASSERT
63 63 static void verify_argslot(MacroAssembler* _masm, Register argslot_reg,
64 64 const char* error_message) {
65 65 // Verify that argslot lies within (rsp, rbp].
66 66 Label L_ok, L_bad;
67 67 __ cmpptr(argslot_reg, rbp);
68 68 __ jccb(Assembler::above, L_bad);
69 69 __ cmpptr(rsp, argslot_reg);
70 70 __ jccb(Assembler::below, L_ok);
71 71 __ bind(L_bad);
72 72 __ stop(error_message);
73 73 __ bind(L_ok);
74 74 }
75 75 #endif
76 76
77 77
78 78 // Code generation
79 79 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
80 80 // rbx: methodOop
81 81 // rcx: receiver method handle (must load from sp[MethodTypeForm.vmslots])
82 82 // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
83 83 // rdx: garbage temp, blown away
84 84
85 85 Register rbx_method = rbx;
86 86 Register rcx_recv = rcx;
87 87 Register rax_mtype = rax;
88 88 Register rdx_temp = rdx;
89 89
90 90 // emit WrongMethodType path first, to enable jccb back-branch from main path
91 91 Label wrong_method_type;
92 92 __ bind(wrong_method_type);
93 93 __ push(rax_mtype); // required mtype
94 94 __ push(rcx_recv); // bad mh (1st stacked argument)
95 95 __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
96 96
97 97 // here's where control starts out:
98 98 __ align(CodeEntryAlignment);
99 99 address entry_point = __ pc();
100 100
101 101 // fetch the MethodType from the method handle into rax (the 'check' register)
102 102 {
103 103 Register tem = rbx_method;
104 104 for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
105 105 __ movptr(rax_mtype, Address(tem, *pchase));
106 106 tem = rax_mtype; // in case there is another indirection
107 107 }
108 108 }
109 109 Register rbx_temp = rbx_method; // done with incoming methodOop
110 110
111 111 // given the MethodType, find out where the MH argument is buried
112 112 __ movptr(rdx_temp, Address(rax_mtype,
113 113 __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rbx_temp)));
114 114 __ movl(rdx_temp, Address(rdx_temp,
115 115 __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, rbx_temp)));
116 116 __ movptr(rcx_recv, __ argument_address(rdx_temp));
117 117
118 118 __ check_method_handle_type(rax_mtype, rcx_recv, rdx_temp, wrong_method_type);
119 119 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
↓ open down ↓ |
119 lines elided |
↑ open up ↑ |
120 120
121 121 return entry_point;
122 122 }
123 123
124 124 // Helper to insert argument slots into the stack.
125 125 // arg_slots must be a multiple of stack_move_unit() and <= 0
126 126 void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
127 127 RegisterOrConstant arg_slots,
128 128 int arg_mask,
129 129 Register rax_argslot,
130 - Register rbx_temp, Register rdx_temp) {
130 + Register rbx_temp, Register rdx_temp, Register temp3_reg) {
131 + assert(temp3_reg == noreg, "temp3 not required");
131 132 assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
132 133 (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
133 134
134 135 #ifdef ASSERT
135 136 verify_argslot(_masm, rax_argslot, "insertion point must fall within current frame");
136 137 if (arg_slots.is_register()) {
137 138 Label L_ok, L_bad;
138 139 __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
139 140 __ jccb(Assembler::greater, L_bad);
140 141 __ testl(arg_slots.as_register(), -stack_move_unit() - 1);
141 142 __ jccb(Assembler::zero, L_ok);
142 143 __ bind(L_bad);
143 144 __ stop("assert arg_slots <= 0 and clear low bits");
144 145 __ bind(L_ok);
145 146 } else {
146 147 assert(arg_slots.as_constant() <= 0, "");
147 148 assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
148 149 }
149 150 #endif //ASSERT
150 151
151 152 #ifdef _LP64
152 153 if (arg_slots.is_register()) {
153 154 // clean high bits of stack motion register (was loaded as an int)
154 155 __ movslq(arg_slots.as_register(), arg_slots.as_register());
155 156 }
156 157 #endif
157 158
158 159 // Make space on the stack for the inserted argument(s).
159 160 // Then pull down everything shallower than rax_argslot.
160 161 // The stacked return address gets pulled down with everything else.
161 162 // That is, copy [rsp, argslot) downward by -size words. In pseudo-code:
162 163 // rsp -= size;
163 164 // for (rdx = rsp + size; rdx < argslot; rdx++)
164 165 // rdx[-size] = rdx[0]
165 166 // argslot -= size;
166 167 __ mov(rdx_temp, rsp); // source pointer for copy
167 168 __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr));
168 169 {
169 170 Label loop;
170 171 __ bind(loop);
171 172 // pull one word down each time through the loop
172 173 __ movptr(rbx_temp, Address(rdx_temp, 0));
173 174 __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
174 175 __ addptr(rdx_temp, wordSize);
175 176 __ cmpptr(rdx_temp, rax_argslot);
176 177 __ jccb(Assembler::less, loop);
177 178 }
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
178 179
179 180 // Now move the argslot down, to point to the opened-up space.
180 181 __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
181 182 }
182 183
183 184 // Helper to remove argument slots from the stack.
184 185 // arg_slots must be a multiple of stack_move_unit() and >= 0
185 186 void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
186 187 RegisterOrConstant arg_slots,
187 188 Register rax_argslot,
188 - Register rbx_temp, Register rdx_temp) {
189 + Register rbx_temp, Register rdx_temp, Register temp3_reg) {
190 + assert(temp3_reg == noreg, "temp3 not required");
189 191 assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
190 192 (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
191 193
192 194 #ifdef ASSERT
193 195 // Verify that [argslot..argslot+size) lies within (rsp, rbp).
194 196 __ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr));
195 197 verify_argslot(_masm, rbx_temp, "deleted argument(s) must fall within current frame");
196 198 if (arg_slots.is_register()) {
197 199 Label L_ok, L_bad;
198 200 __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
199 201 __ jccb(Assembler::less, L_bad);
200 202 __ testl(arg_slots.as_register(), -stack_move_unit() - 1);
201 203 __ jccb(Assembler::zero, L_ok);
202 204 __ bind(L_bad);
203 205 __ stop("assert arg_slots >= 0 and clear low bits");
204 206 __ bind(L_ok);
205 207 } else {
206 208 assert(arg_slots.as_constant() >= 0, "");
207 209 assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
208 210 }
209 211 #endif //ASSERT
210 212
211 213 #ifdef _LP64
212 214 if (false) { // not needed, since register is positive
213 215 // clean high bits of stack motion register (was loaded as an int)
214 216 if (arg_slots.is_register())
215 217 __ movslq(arg_slots.as_register(), arg_slots.as_register());
216 218 }
217 219 #endif
218 220
219 221 // Pull up everything shallower than rax_argslot.
220 222 // Then remove the excess space on the stack.
221 223 // The stacked return address gets pulled up with everything else.
222 224 // That is, copy [rsp, argslot) upward by size words. In pseudo-code:
223 225 // for (rdx = argslot-1; rdx >= rsp; --rdx)
224 226 // rdx[size] = rdx[0]
225 227 // argslot += size;
226 228 // rsp += size;
227 229 __ lea(rdx_temp, Address(rax_argslot, -wordSize)); // source pointer for copy
228 230 {
229 231 Label loop;
230 232 __ bind(loop);
231 233 // pull one word up each time through the loop
232 234 __ movptr(rbx_temp, Address(rdx_temp, 0));
233 235 __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
234 236 __ addptr(rdx_temp, -wordSize);
235 237 __ cmpptr(rdx_temp, rsp);
236 238 __ jccb(Assembler::greaterEqual, loop);
237 239 }
238 240
239 241 // Now move the argslot up, to point to the just-copied block.
240 242 __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr));
241 243 // And adjust the argslot address to point at the deletion point.
242 244 __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
243 245 }
244 246
245 247 #ifndef PRODUCT
246 248 extern "C" void print_method_handle(oop mh);
247 249 void trace_method_handle_stub(const char* adaptername,
248 250 oop mh,
249 251 intptr_t* entry_sp,
250 252 intptr_t* saved_sp,
251 253 intptr_t* saved_bp) {
252 254 // called as a leaf from native code: do not block the JVM!
253 255 intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
254 256 intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
255 257 printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n",
256 258 adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
257 259 if (last_sp != saved_sp)
258 260 printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
259 261 if (Verbose) print_method_handle(mh);
260 262 }
261 263 #endif //PRODUCT
262 264
263 265 // Generate an "entry" field for a method handle.
264 266 // This determines how the method handle will respond to calls.
265 267 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
266 268 // Here is the register state during an interpreted call,
267 269 // as set up by generate_method_handle_interpreter_entry():
268 270 // - rbx: garbage temp (was MethodHandle.invoke methodOop, unused)
269 271 // - rcx: receiver method handle
270 272 // - rax: method handle type (only used by the check_mtype entry point)
271 273 // - rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
272 274 // - rdx: garbage temp, can blow away
273 275
274 276 Register rcx_recv = rcx;
275 277 Register rax_argslot = rax;
276 278 Register rbx_temp = rbx;
277 279 Register rdx_temp = rdx;
278 280
279 281 // This guy is set up by prepare_to_jump_from_interpreted (from interpreted calls)
280 282 // and gen_c2i_adapter (from compiled calls):
281 283 Register saved_last_sp = LP64_ONLY(r13) NOT_LP64(rsi);
282 284
283 285 guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
284 286
285 287 // some handy addresses
286 288 Address rbx_method_fie( rbx, methodOopDesc::from_interpreted_offset() );
287 289
288 290 Address rcx_mh_vmtarget( rcx_recv, java_dyn_MethodHandle::vmtarget_offset_in_bytes() );
289 291 Address rcx_dmh_vmindex( rcx_recv, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes() );
290 292
291 293 Address rcx_bmh_vmargslot( rcx_recv, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes() );
292 294 Address rcx_bmh_argument( rcx_recv, sun_dyn_BoundMethodHandle::argument_offset_in_bytes() );
293 295
294 296 Address rcx_amh_vmargslot( rcx_recv, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes() );
295 297 Address rcx_amh_argument( rcx_recv, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes() );
296 298 Address rcx_amh_conversion( rcx_recv, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes() );
297 299 Address vmarg; // __ argument_address(vmargslot)
298 300
299 301 const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
300 302
301 303 if (have_entry(ek)) {
302 304 __ nop(); // empty stubs make SG sick
303 305 return;
304 306 }
305 307
306 308 address interp_entry = __ pc();
307 309 if (UseCompressedOops) __ unimplemented("UseCompressedOops");
308 310
309 311 #ifndef PRODUCT
310 312 if (TraceMethodHandles) {
311 313 __ push(rax); __ push(rbx); __ push(rcx); __ push(rdx); __ push(rsi); __ push(rdi);
312 314 __ lea(rax, Address(rsp, wordSize*6)); // entry_sp
313 315 // arguments:
314 316 __ push(rbp); // interpreter frame pointer
315 317 __ push(rsi); // saved_sp
316 318 __ push(rax); // entry_sp
317 319 __ push(rcx); // mh
318 320 __ push(rcx);
319 321 __ movptr(Address(rsp, 0), (intptr_t)entry_name(ek));
320 322 __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), 5);
321 323 __ pop(rdi); __ pop(rsi); __ pop(rdx); __ pop(rcx); __ pop(rbx); __ pop(rax);
322 324 }
323 325 #endif //PRODUCT
324 326
325 327 switch ((int) ek) {
326 328 case _raise_exception:
327 329 {
328 330 // Not a real MH entry, but rather shared code for raising an exception.
329 331 // Extra local arguments are pushed on stack, as required type at TOS+8,
330 332 // failing object (or NULL) at TOS+4, failing bytecode type at TOS.
331 333 // Beyond those local arguments are the PC, of course.
332 334 Register rdx_code = rdx_temp;
333 335 Register rcx_fail = rcx_recv;
334 336 Register rax_want = rax_argslot;
335 337 Register rdi_pc = rdi;
336 338 __ pop(rdx_code); // TOS+0
337 339 __ pop(rcx_fail); // TOS+4
338 340 __ pop(rax_want); // TOS+8
339 341 __ pop(rdi_pc); // caller PC
340 342
341 343 __ mov(rsp, rsi); // cut the stack back to where the caller started
342 344
343 345 // Repush the arguments as if coming from the interpreter.
344 346 __ push(rdx_code);
345 347 __ push(rcx_fail);
346 348 __ push(rax_want);
347 349
348 350 Register rbx_method = rbx_temp;
349 351 Label no_method;
350 352 // FIXME: fill in _raise_exception_method with a suitable sun.dyn method
351 353 __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
352 354 __ testptr(rbx_method, rbx_method);
353 355 __ jccb(Assembler::zero, no_method);
354 356 int jobject_oop_offset = 0;
355 357 __ movptr(rbx_method, Address(rbx_method, jobject_oop_offset)); // dereference the jobject
356 358 __ testptr(rbx_method, rbx_method);
357 359 __ jccb(Assembler::zero, no_method);
358 360 __ verify_oop(rbx_method);
359 361 __ push(rdi_pc); // and restore caller PC
360 362 __ jmp(rbx_method_fie);
361 363
362 364 // If we get here, the Java runtime did not do its job of creating the exception.
363 365 // Do something that is at least causes a valid throw from the interpreter.
364 366 __ bind(no_method);
365 367 __ pop(rax_want);
366 368 __ pop(rcx_fail);
367 369 __ push(rax_want);
368 370 __ push(rcx_fail);
369 371 __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
370 372 }
371 373 break;
372 374
373 375 case _invokestatic_mh:
374 376 case _invokespecial_mh:
375 377 {
376 378 Register rbx_method = rbx_temp;
377 379 __ movptr(rbx_method, rcx_mh_vmtarget); // target is a methodOop
378 380 __ verify_oop(rbx_method);
379 381 // same as TemplateTable::invokestatic or invokespecial,
380 382 // minus the CP setup and profiling:
381 383 if (ek == _invokespecial_mh) {
382 384 // Must load & check the first argument before entering the target method.
383 385 __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
384 386 __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
385 387 __ null_check(rcx_recv);
386 388 __ verify_oop(rcx_recv);
387 389 }
388 390 __ jmp(rbx_method_fie);
389 391 }
390 392 break;
391 393
392 394 case _invokevirtual_mh:
393 395 {
394 396 // same as TemplateTable::invokevirtual,
395 397 // minus the CP setup and profiling:
396 398
397 399 // pick out the vtable index and receiver offset from the MH,
398 400 // and then we can discard it:
399 401 __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
400 402 Register rbx_index = rbx_temp;
401 403 __ movl(rbx_index, rcx_dmh_vmindex);
402 404 // Note: The verifier allows us to ignore rcx_mh_vmtarget.
403 405 __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
404 406 __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes());
405 407
406 408 // get receiver klass
407 409 Register rax_klass = rax_argslot;
408 410 __ load_klass(rax_klass, rcx_recv);
409 411 __ verify_oop(rax_klass);
410 412
411 413 // get target methodOop & entry point
412 414 const int base = instanceKlass::vtable_start_offset() * wordSize;
413 415 assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
414 416 Address vtable_entry_addr(rax_klass,
415 417 rbx_index, Address::times_ptr,
416 418 base + vtableEntry::method_offset_in_bytes());
417 419 Register rbx_method = rbx_temp;
418 420 __ movptr(rbx_method, vtable_entry_addr);
419 421
420 422 __ verify_oop(rbx_method);
421 423 __ jmp(rbx_method_fie);
422 424 }
423 425 break;
424 426
425 427 case _invokeinterface_mh:
426 428 {
427 429 // same as TemplateTable::invokeinterface,
428 430 // minus the CP setup and profiling:
429 431
430 432 // pick out the interface and itable index from the MH.
431 433 __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
432 434 Register rdx_intf = rdx_temp;
433 435 Register rbx_index = rbx_temp;
434 436 __ movptr(rdx_intf, rcx_mh_vmtarget);
435 437 __ movl(rbx_index, rcx_dmh_vmindex);
436 438 __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
437 439 __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes());
438 440
439 441 // get receiver klass
440 442 Register rax_klass = rax_argslot;
441 443 __ load_klass(rax_klass, rcx_recv);
442 444 __ verify_oop(rax_klass);
443 445
444 446 Register rdi_temp = rdi;
445 447 Register rbx_method = rbx_index;
446 448
447 449 // get interface klass
448 450 Label no_such_interface;
449 451 __ verify_oop(rdx_intf);
450 452 __ lookup_interface_method(rax_klass, rdx_intf,
451 453 // note: next two args must be the same:
452 454 rbx_index, rbx_method,
453 455 rdi_temp,
454 456 no_such_interface);
455 457
456 458 __ verify_oop(rbx_method);
457 459 __ jmp(rbx_method_fie);
458 460 __ hlt();
459 461
460 462 __ bind(no_such_interface);
461 463 // Throw an exception.
462 464 // For historical reasons, it will be IncompatibleClassChangeError.
463 465 __ pushptr(Address(rdx_intf, java_mirror_offset)); // required interface
464 466 __ push(rcx_recv); // bad receiver
465 467 __ push((int)Bytecodes::_invokeinterface); // who is complaining?
466 468 __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
467 469 }
468 470 break;
469 471
470 472 case _bound_ref_mh:
471 473 case _bound_int_mh:
472 474 case _bound_long_mh:
473 475 case _bound_ref_direct_mh:
474 476 case _bound_int_direct_mh:
475 477 case _bound_long_direct_mh:
476 478 {
477 479 bool direct_to_method = (ek >= _bound_ref_direct_mh);
478 480 BasicType arg_type = T_ILLEGAL;
479 481 int arg_mask = _INSERT_NO_MASK;
480 482 int arg_slots = -1;
481 483 get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
482 484
483 485 // make room for the new argument:
484 486 __ movl(rax_argslot, rcx_bmh_vmargslot);
485 487 __ lea(rax_argslot, __ argument_address(rax_argslot));
486 488 insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask,
487 489 rax_argslot, rbx_temp, rdx_temp);
488 490
489 491 // store bound argument into the new stack slot:
490 492 __ movptr(rbx_temp, rcx_bmh_argument);
491 493 Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type));
492 494 if (arg_type == T_OBJECT) {
493 495 __ movptr(Address(rax_argslot, 0), rbx_temp);
494 496 } else {
495 497 __ load_sized_value(rdx_temp, prim_value_addr,
496 498 type2aelembytes(arg_type), is_signed_subword_type(arg_type));
497 499 __ movptr(Address(rax_argslot, 0), rdx_temp);
498 500 #ifndef _LP64
499 501 if (arg_slots == 2) {
500 502 __ movl(rdx_temp, prim_value_addr.plus_disp(wordSize));
501 503 __ movl(Address(rax_argslot, Interpreter::stackElementSize()), rdx_temp);
502 504 }
503 505 #endif //_LP64
504 506 }
505 507
506 508 if (direct_to_method) {
507 509 Register rbx_method = rbx_temp;
508 510 __ movptr(rbx_method, rcx_mh_vmtarget);
509 511 __ verify_oop(rbx_method);
510 512 __ jmp(rbx_method_fie);
511 513 } else {
512 514 __ movptr(rcx_recv, rcx_mh_vmtarget);
513 515 __ verify_oop(rcx_recv);
514 516 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
515 517 }
516 518 }
517 519 break;
518 520
519 521 case _adapter_retype_only:
520 522 case _adapter_retype_raw:
521 523 // immediately jump to the next MH layer:
522 524 __ movptr(rcx_recv, rcx_mh_vmtarget);
523 525 __ verify_oop(rcx_recv);
524 526 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
525 527 // This is OK when all parameter types widen.
526 528 // It is also OK when a return type narrows.
527 529 break;
528 530
529 531 case _adapter_check_cast:
530 532 {
531 533 // temps:
532 534 Register rbx_klass = rbx_temp; // interesting AMH data
533 535
534 536 // check a reference argument before jumping to the next layer of MH:
535 537 __ movl(rax_argslot, rcx_amh_vmargslot);
536 538 vmarg = __ argument_address(rax_argslot);
537 539
538 540 // What class are we casting to?
539 541 __ movptr(rbx_klass, rcx_amh_argument); // this is a Class object!
540 542 __ movptr(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));
541 543
542 544 Label done;
543 545 __ movptr(rdx_temp, vmarg);
544 546 __ testptr(rdx_temp, rdx_temp);
545 547 __ jccb(Assembler::zero, done); // no cast if null
546 548 __ load_klass(rdx_temp, rdx_temp);
547 549
548 550 // live at this point:
549 551 // - rbx_klass: klass required by the target method
550 552 // - rdx_temp: argument klass to test
551 553 // - rcx_recv: adapter method handle
552 554 __ check_klass_subtype(rdx_temp, rbx_klass, rax_argslot, done);
553 555
554 556 // If we get here, the type check failed!
555 557 // Call the wrong_method_type stub, passing the failing argument type in rax.
556 558 Register rax_mtype = rax_argslot;
557 559 __ movl(rax_argslot, rcx_amh_vmargslot); // reload argslot field
558 560 __ movptr(rdx_temp, vmarg);
559 561
560 562 __ pushptr(rcx_amh_argument); // required class
561 563 __ push(rdx_temp); // bad object
562 564 __ push((int)Bytecodes::_checkcast); // who is complaining?
563 565 __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
564 566
565 567 __ bind(done);
566 568 // get the new MH:
567 569 __ movptr(rcx_recv, rcx_mh_vmtarget);
568 570 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
569 571 }
570 572 break;
571 573
572 574 case _adapter_prim_to_prim:
573 575 case _adapter_ref_to_prim:
574 576 // handled completely by optimized cases
575 577 __ stop("init_AdapterMethodHandle should not issue this");
576 578 break;
577 579
578 580 case _adapter_opt_i2i: // optimized subcase of adapt_prim_to_prim
579 581 //case _adapter_opt_f2i: // optimized subcase of adapt_prim_to_prim
580 582 case _adapter_opt_l2i: // optimized subcase of adapt_prim_to_prim
581 583 case _adapter_opt_unboxi: // optimized subcase of adapt_ref_to_prim
582 584 {
583 585 // perform an in-place conversion to int or an int subword
584 586 __ movl(rax_argslot, rcx_amh_vmargslot);
585 587 vmarg = __ argument_address(rax_argslot);
586 588
587 589 switch (ek) {
588 590 case _adapter_opt_i2i:
589 591 __ movl(rdx_temp, vmarg);
590 592 break;
591 593 case _adapter_opt_l2i:
592 594 {
593 595 // just delete the extra slot; on a little-endian machine we keep the first
594 596 __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
595 597 remove_arg_slots(_masm, -stack_move_unit(),
596 598 rax_argslot, rbx_temp, rdx_temp);
597 599 vmarg = Address(rax_argslot, -Interpreter::stackElementSize());
598 600 __ movl(rdx_temp, vmarg);
599 601 }
600 602 break;
601 603 case _adapter_opt_unboxi:
602 604 {
603 605 // Load the value up from the heap.
604 606 __ movptr(rdx_temp, vmarg);
605 607 int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
606 608 #ifdef ASSERT
607 609 for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
608 610 if (is_subword_type(BasicType(bt)))
609 611 assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
610 612 }
611 613 #endif
612 614 __ null_check(rdx_temp, value_offset);
613 615 __ movl(rdx_temp, Address(rdx_temp, value_offset));
614 616 // We load this as a word. Because we are little-endian,
615 617 // the low bits will be correct, but the high bits may need cleaning.
616 618 // The vminfo will guide us to clean those bits.
617 619 }
618 620 break;
619 621 default:
620 622 ShouldNotReachHere();
621 623 }
622 624
623 625 // Do the requested conversion and store the value.
624 626 Register rbx_vminfo = rbx_temp;
625 627 __ movl(rbx_vminfo, rcx_amh_conversion);
626 628 assert(CONV_VMINFO_SHIFT == 0, "preshifted");
627 629
628 630 // get the new MH:
629 631 __ movptr(rcx_recv, rcx_mh_vmtarget);
630 632 // (now we are done with the old MH)
631 633
632 634 // original 32-bit vmdata word must be of this form:
633 635 // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
634 636 __ xchgptr(rcx, rbx_vminfo); // free rcx for shifts
635 637 __ shll(rdx_temp /*, rcx*/);
636 638 Label zero_extend, done;
637 639 __ testl(rcx, CONV_VMINFO_SIGN_FLAG);
638 640 __ jccb(Assembler::zero, zero_extend);
639 641
640 642 // this path is taken for int->byte, int->short
641 643 __ sarl(rdx_temp /*, rcx*/);
642 644 __ jmpb(done);
643 645
644 646 __ bind(zero_extend);
645 647 // this is taken for int->char
646 648 __ shrl(rdx_temp /*, rcx*/);
647 649
648 650 __ bind(done);
649 651 __ movl(vmarg, rdx_temp); // Store the value.
650 652 __ xchgptr(rcx, rbx_vminfo); // restore rcx_recv
651 653
652 654 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
653 655 }
654 656 break;
655 657
656 658 case _adapter_opt_i2l: // optimized subcase of adapt_prim_to_prim
657 659 case _adapter_opt_unboxl: // optimized subcase of adapt_ref_to_prim
658 660 {
659 661 // perform an in-place int-to-long or ref-to-long conversion
660 662 __ movl(rax_argslot, rcx_amh_vmargslot);
661 663
662 664 // on a little-endian machine we keep the first slot and add another after
663 665 __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
664 666 insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
665 667 rax_argslot, rbx_temp, rdx_temp);
666 668 Address vmarg1(rax_argslot, -Interpreter::stackElementSize());
667 669 Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize());
668 670
669 671 switch (ek) {
670 672 case _adapter_opt_i2l:
671 673 {
672 674 #ifdef _LP64
673 675 __ movslq(rdx_temp, vmarg1); // Load sign-extended
674 676 __ movq(vmarg1, rdx_temp); // Store into first slot
675 677 #else
676 678 __ movl(rdx_temp, vmarg1);
677 679 __ sarl(rdx_temp, BitsPerInt - 1); // __ extend_sign()
678 680 __ movl(vmarg2, rdx_temp); // store second word
679 681 #endif
680 682 }
681 683 break;
682 684 case _adapter_opt_unboxl:
683 685 {
684 686 // Load the value up from the heap.
685 687 __ movptr(rdx_temp, vmarg1);
686 688 int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
687 689 assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
688 690 __ null_check(rdx_temp, value_offset);
689 691 #ifdef _LP64
690 692 __ movq(rbx_temp, Address(rdx_temp, value_offset));
691 693 __ movq(vmarg1, rbx_temp);
692 694 #else
693 695 __ movl(rbx_temp, Address(rdx_temp, value_offset + 0*BytesPerInt));
694 696 __ movl(rdx_temp, Address(rdx_temp, value_offset + 1*BytesPerInt));
695 697 __ movl(vmarg1, rbx_temp);
696 698 __ movl(vmarg2, rdx_temp);
697 699 #endif
698 700 }
699 701 break;
700 702 default:
701 703 ShouldNotReachHere();
702 704 }
703 705
704 706 __ movptr(rcx_recv, rcx_mh_vmtarget);
705 707 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
706 708 }
707 709 break;
708 710
709 711 case _adapter_opt_f2d: // optimized subcase of adapt_prim_to_prim
710 712 case _adapter_opt_d2f: // optimized subcase of adapt_prim_to_prim
711 713 {
712 714 // perform an in-place floating primitive conversion
713 715 __ movl(rax_argslot, rcx_amh_vmargslot);
714 716 __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
715 717 if (ek == _adapter_opt_f2d) {
716 718 insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
717 719 rax_argslot, rbx_temp, rdx_temp);
718 720 }
719 721 Address vmarg(rax_argslot, -Interpreter::stackElementSize());
720 722
721 723 #ifdef _LP64
722 724 if (ek == _adapter_opt_f2d) {
723 725 __ movflt(xmm0, vmarg);
724 726 __ cvtss2sd(xmm0, xmm0);
725 727 __ movdbl(vmarg, xmm0);
726 728 } else {
727 729 __ movdbl(xmm0, vmarg);
728 730 __ cvtsd2ss(xmm0, xmm0);
729 731 __ movflt(vmarg, xmm0);
730 732 }
731 733 #else //_LP64
732 734 if (ek == _adapter_opt_f2d) {
733 735 __ fld_s(vmarg); // load float to ST0
734 736 __ fstp_s(vmarg); // store single
735 737 } else {
736 738 __ fld_d(vmarg); // load double to ST0
737 739 __ fstp_s(vmarg); // store single
738 740 }
739 741 #endif //_LP64
740 742
741 743 if (ek == _adapter_opt_d2f) {
742 744 remove_arg_slots(_masm, -stack_move_unit(),
743 745 rax_argslot, rbx_temp, rdx_temp);
744 746 }
745 747
746 748 __ movptr(rcx_recv, rcx_mh_vmtarget);
747 749 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
748 750 }
749 751 break;
750 752
751 753 case _adapter_prim_to_ref:
752 754 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
753 755 break;
754 756
755 757 case _adapter_swap_args:
756 758 case _adapter_rot_args:
757 759 // handled completely by optimized cases
758 760 __ stop("init_AdapterMethodHandle should not issue this");
759 761 break;
760 762
761 763 case _adapter_opt_swap_1:
762 764 case _adapter_opt_swap_2:
763 765 case _adapter_opt_rot_1_up:
764 766 case _adapter_opt_rot_1_down:
765 767 case _adapter_opt_rot_2_up:
766 768 case _adapter_opt_rot_2_down:
767 769 {
768 770 int swap_bytes = 0, rotate = 0;
769 771 get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
770 772
771 773 // 'argslot' is the position of the first argument to swap
772 774 __ movl(rax_argslot, rcx_amh_vmargslot);
773 775 __ lea(rax_argslot, __ argument_address(rax_argslot));
774 776
775 777 // 'vminfo' is the second
776 778 Register rbx_destslot = rbx_temp;
777 779 __ movl(rbx_destslot, rcx_amh_conversion);
778 780 assert(CONV_VMINFO_SHIFT == 0, "preshifted");
779 781 __ andl(rbx_destslot, CONV_VMINFO_MASK);
780 782 __ lea(rbx_destslot, __ argument_address(rbx_destslot));
781 783 DEBUG_ONLY(verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame"));
782 784
783 785 if (!rotate) {
784 786 for (int i = 0; i < swap_bytes; i += wordSize) {
785 787 __ movptr(rdx_temp, Address(rax_argslot , i));
786 788 __ push(rdx_temp);
787 789 __ movptr(rdx_temp, Address(rbx_destslot, i));
788 790 __ movptr(Address(rax_argslot, i), rdx_temp);
789 791 __ pop(rdx_temp);
790 792 __ movptr(Address(rbx_destslot, i), rdx_temp);
791 793 }
792 794 } else {
793 795 // push the first chunk, which is going to get overwritten
794 796 for (int i = swap_bytes; (i -= wordSize) >= 0; ) {
795 797 __ movptr(rdx_temp, Address(rax_argslot, i));
796 798 __ push(rdx_temp);
797 799 }
798 800
799 801 if (rotate > 0) {
800 802 // rotate upward
801 803 __ subptr(rax_argslot, swap_bytes);
802 804 #ifdef ASSERT
803 805 {
804 806 // Verify that argslot > destslot, by at least swap_bytes.
805 807 Label L_ok;
806 808 __ cmpptr(rax_argslot, rbx_destslot);
807 809 __ jccb(Assembler::aboveEqual, L_ok);
808 810 __ stop("source must be above destination (upward rotation)");
809 811 __ bind(L_ok);
810 812 }
811 813 #endif
812 814 // work argslot down to destslot, copying contiguous data upwards
813 815 // pseudo-code:
814 816 // rax = src_addr - swap_bytes
815 817 // rbx = dest_addr
816 818 // while (rax >= rbx) *(rax + swap_bytes) = *(rax + 0), rax--;
817 819 Label loop;
818 820 __ bind(loop);
819 821 __ movptr(rdx_temp, Address(rax_argslot, 0));
820 822 __ movptr(Address(rax_argslot, swap_bytes), rdx_temp);
821 823 __ addptr(rax_argslot, -wordSize);
822 824 __ cmpptr(rax_argslot, rbx_destslot);
823 825 __ jccb(Assembler::aboveEqual, loop);
824 826 } else {
825 827 __ addptr(rax_argslot, swap_bytes);
826 828 #ifdef ASSERT
827 829 {
828 830 // Verify that argslot < destslot, by at least swap_bytes.
829 831 Label L_ok;
830 832 __ cmpptr(rax_argslot, rbx_destslot);
831 833 __ jccb(Assembler::belowEqual, L_ok);
832 834 __ stop("source must be below destination (downward rotation)");
833 835 __ bind(L_ok);
834 836 }
835 837 #endif
836 838 // work argslot up to destslot, copying contiguous data downwards
837 839 // pseudo-code:
838 840 // rax = src_addr + swap_bytes
839 841 // rbx = dest_addr
840 842 // while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++;
841 843 Label loop;
842 844 __ bind(loop);
843 845 __ movptr(rdx_temp, Address(rax_argslot, 0));
844 846 __ movptr(Address(rax_argslot, -swap_bytes), rdx_temp);
845 847 __ addptr(rax_argslot, wordSize);
846 848 __ cmpptr(rax_argslot, rbx_destslot);
847 849 __ jccb(Assembler::belowEqual, loop);
848 850 }
849 851
850 852 // pop the original first chunk into the destination slot, now free
851 853 for (int i = 0; i < swap_bytes; i += wordSize) {
852 854 __ pop(rdx_temp);
853 855 __ movptr(Address(rbx_destslot, i), rdx_temp);
854 856 }
855 857 }
856 858
857 859 __ movptr(rcx_recv, rcx_mh_vmtarget);
858 860 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
859 861 }
860 862 break;
861 863
862 864 case _adapter_dup_args:
863 865 {
864 866 // 'argslot' is the position of the first argument to duplicate
865 867 __ movl(rax_argslot, rcx_amh_vmargslot);
866 868 __ lea(rax_argslot, __ argument_address(rax_argslot));
867 869
868 870 // 'stack_move' is negative number of words to duplicate
869 871 Register rdx_stack_move = rdx_temp;
870 872 __ movl2ptr(rdx_stack_move, rcx_amh_conversion);
871 873 __ sarptr(rdx_stack_move, CONV_STACK_MOVE_SHIFT);
872 874
873 875 int argslot0_num = 0;
874 876 Address argslot0 = __ argument_address(RegisterOrConstant(argslot0_num));
875 877 assert(argslot0.base() == rsp, "");
876 878 int pre_arg_size = argslot0.disp();
877 879 assert(pre_arg_size % wordSize == 0, "");
878 880 assert(pre_arg_size > 0, "must include PC");
879 881
880 882 // remember the old rsp+1 (argslot[0])
881 883 Register rbx_oldarg = rbx_temp;
882 884 __ lea(rbx_oldarg, argslot0);
883 885
884 886 // move rsp down to make room for dups
885 887 __ lea(rsp, Address(rsp, rdx_stack_move, Address::times_ptr));
886 888
887 889 // compute the new rsp+1 (argslot[0])
888 890 Register rdx_newarg = rdx_temp;
889 891 __ lea(rdx_newarg, argslot0);
890 892
891 893 __ push(rdi); // need a temp
892 894 // (preceding push must be done after arg addresses are taken!)
893 895
894 896 // pull down the pre_arg_size data (PC)
895 897 for (int i = -pre_arg_size; i < 0; i += wordSize) {
896 898 __ movptr(rdi, Address(rbx_oldarg, i));
897 899 __ movptr(Address(rdx_newarg, i), rdi);
898 900 }
899 901
900 902 // copy from rax_argslot[0...] down to new_rsp[1...]
901 903 // pseudo-code:
902 904 // rbx = old_rsp+1
903 905 // rdx = new_rsp+1
904 906 // rax = argslot
905 907 // while (rdx < rbx) *rdx++ = *rax++
906 908 Label loop;
907 909 __ bind(loop);
908 910 __ movptr(rdi, Address(rax_argslot, 0));
909 911 __ movptr(Address(rdx_newarg, 0), rdi);
910 912 __ addptr(rax_argslot, wordSize);
911 913 __ addptr(rdx_newarg, wordSize);
912 914 __ cmpptr(rdx_newarg, rbx_oldarg);
913 915 __ jccb(Assembler::less, loop);
914 916
915 917 __ pop(rdi); // restore temp
916 918
917 919 __ movptr(rcx_recv, rcx_mh_vmtarget);
918 920 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
919 921 }
920 922 break;
921 923
922 924 case _adapter_drop_args:
923 925 {
924 926 // 'argslot' is the position of the first argument to nuke
925 927 __ movl(rax_argslot, rcx_amh_vmargslot);
926 928 __ lea(rax_argslot, __ argument_address(rax_argslot));
927 929
928 930 __ push(rdi); // need a temp
929 931 // (must do previous push after argslot address is taken)
930 932
931 933 // 'stack_move' is number of words to drop
932 934 Register rdi_stack_move = rdi;
933 935 __ movl2ptr(rdi_stack_move, rcx_amh_conversion);
934 936 __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
935 937 remove_arg_slots(_masm, rdi_stack_move,
936 938 rax_argslot, rbx_temp, rdx_temp);
937 939
938 940 __ pop(rdi); // restore temp
939 941
940 942 __ movptr(rcx_recv, rcx_mh_vmtarget);
941 943 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
942 944 }
943 945 break;
944 946
945 947 case _adapter_collect_args:
946 948 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
947 949 break;
948 950
949 951 case _adapter_spread_args:
950 952 // handled completely by optimized cases
951 953 __ stop("init_AdapterMethodHandle should not issue this");
952 954 break;
953 955
954 956 case _adapter_opt_spread_0:
955 957 case _adapter_opt_spread_1:
956 958 case _adapter_opt_spread_more:
957 959 {
958 960 // spread an array out into a group of arguments
959 961 int length_constant = get_ek_adapter_opt_spread_info(ek);
960 962
961 963 // find the address of the array argument
962 964 __ movl(rax_argslot, rcx_amh_vmargslot);
963 965 __ lea(rax_argslot, __ argument_address(rax_argslot));
964 966
965 967 // grab some temps
966 968 { __ push(rsi); __ push(rdi); }
967 969 // (preceding pushes must be done after argslot address is taken!)
968 970 #define UNPUSH_RSI_RDI \
969 971 { __ pop(rdi); __ pop(rsi); }
970 972
971 973 // arx_argslot points both to the array and to the first output arg
972 974 vmarg = Address(rax_argslot, 0);
973 975
974 976 // Get the array value.
975 977 Register rsi_array = rsi;
976 978 Register rdx_array_klass = rdx_temp;
977 979 BasicType elem_type = T_OBJECT;
978 980 int length_offset = arrayOopDesc::length_offset_in_bytes();
979 981 int elem0_offset = arrayOopDesc::base_offset_in_bytes(elem_type);
980 982 __ movptr(rsi_array, vmarg);
981 983 Label skip_array_check;
982 984 if (length_constant == 0) {
983 985 __ testptr(rsi_array, rsi_array);
984 986 __ jcc(Assembler::zero, skip_array_check);
985 987 }
986 988 __ null_check(rsi_array, oopDesc::klass_offset_in_bytes());
987 989 __ load_klass(rdx_array_klass, rsi_array);
988 990
989 991 // Check the array type.
990 992 Register rbx_klass = rbx_temp;
991 993 __ movptr(rbx_klass, rcx_amh_argument); // this is a Class object!
992 994 __ movptr(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));
993 995
994 996 Label ok_array_klass, bad_array_klass, bad_array_length;
995 997 __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi, ok_array_klass);
996 998 // If we get here, the type check failed!
997 999 __ jmp(bad_array_klass);
998 1000 __ bind(ok_array_klass);
999 1001
1000 1002 // Check length.
1001 1003 if (length_constant >= 0) {
1002 1004 __ cmpl(Address(rsi_array, length_offset), length_constant);
1003 1005 } else {
1004 1006 Register rbx_vminfo = rbx_temp;
1005 1007 __ movl(rbx_vminfo, rcx_amh_conversion);
1006 1008 assert(CONV_VMINFO_SHIFT == 0, "preshifted");
1007 1009 __ andl(rbx_vminfo, CONV_VMINFO_MASK);
1008 1010 __ cmpl(rbx_vminfo, Address(rsi_array, length_offset));
1009 1011 }
1010 1012 __ jcc(Assembler::notEqual, bad_array_length);
1011 1013
1012 1014 Register rdx_argslot_limit = rdx_temp;
1013 1015
1014 1016 // Array length checks out. Now insert any required stack slots.
1015 1017 if (length_constant == -1) {
1016 1018 // Form a pointer to the end of the affected region.
1017 1019 __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize()));
1018 1020 // 'stack_move' is negative number of words to insert
1019 1021 Register rdi_stack_move = rdi;
1020 1022 __ movl2ptr(rdi_stack_move, rcx_amh_conversion);
1021 1023 __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
1022 1024 Register rsi_temp = rsi_array; // spill this
1023 1025 insert_arg_slots(_masm, rdi_stack_move, -1,
1024 1026 rax_argslot, rbx_temp, rsi_temp);
1025 1027 // reload the array (since rsi was killed)
1026 1028 __ movptr(rsi_array, vmarg);
1027 1029 } else if (length_constant > 1) {
1028 1030 int arg_mask = 0;
1029 1031 int new_slots = (length_constant - 1);
1030 1032 for (int i = 0; i < new_slots; i++) {
1031 1033 arg_mask <<= 1;
1032 1034 arg_mask |= _INSERT_REF_MASK;
1033 1035 }
1034 1036 insert_arg_slots(_masm, new_slots * stack_move_unit(), arg_mask,
1035 1037 rax_argslot, rbx_temp, rdx_temp);
1036 1038 } else if (length_constant == 1) {
1037 1039 // no stack resizing required
1038 1040 } else if (length_constant == 0) {
1039 1041 remove_arg_slots(_masm, -stack_move_unit(),
1040 1042 rax_argslot, rbx_temp, rdx_temp);
1041 1043 }
1042 1044
1043 1045 // Copy from the array to the new slots.
1044 1046 // Note: Stack change code preserves integrity of rax_argslot pointer.
1045 1047 // So even after slot insertions, rax_argslot still points to first argument.
1046 1048 if (length_constant == -1) {
1047 1049 // [rax_argslot, rdx_argslot_limit) is the area we are inserting into.
1048 1050 Register rsi_source = rsi_array;
1049 1051 __ lea(rsi_source, Address(rsi_array, elem0_offset));
1050 1052 Label loop;
1051 1053 __ bind(loop);
1052 1054 __ movptr(rbx_temp, Address(rsi_source, 0));
1053 1055 __ movptr(Address(rax_argslot, 0), rbx_temp);
1054 1056 __ addptr(rsi_source, type2aelembytes(elem_type));
1055 1057 __ addptr(rax_argslot, Interpreter::stackElementSize());
1056 1058 __ cmpptr(rax_argslot, rdx_argslot_limit);
1057 1059 __ jccb(Assembler::less, loop);
1058 1060 } else if (length_constant == 0) {
1059 1061 __ bind(skip_array_check);
1060 1062 // nothing to copy
1061 1063 } else {
1062 1064 int elem_offset = elem0_offset;
1063 1065 int slot_offset = 0;
1064 1066 for (int index = 0; index < length_constant; index++) {
1065 1067 __ movptr(rbx_temp, Address(rsi_array, elem_offset));
1066 1068 __ movptr(Address(rax_argslot, slot_offset), rbx_temp);
1067 1069 elem_offset += type2aelembytes(elem_type);
1068 1070 slot_offset += Interpreter::stackElementSize();
1069 1071 }
1070 1072 }
1071 1073
1072 1074 // Arguments are spread. Move to next method handle.
1073 1075 UNPUSH_RSI_RDI;
1074 1076 __ movptr(rcx_recv, rcx_mh_vmtarget);
1075 1077 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
1076 1078
1077 1079 __ bind(bad_array_klass);
1078 1080 UNPUSH_RSI_RDI;
1079 1081 __ pushptr(Address(rdx_array_klass, java_mirror_offset)); // required type
1080 1082 __ pushptr(vmarg); // bad array
1081 1083 __ push((int)Bytecodes::_aaload); // who is complaining?
1082 1084 __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
1083 1085
1084 1086 __ bind(bad_array_length);
1085 1087 UNPUSH_RSI_RDI;
1086 1088 __ push(rcx_recv); // AMH requiring a certain length
1087 1089 __ pushptr(vmarg); // bad array
1088 1090 __ push((int)Bytecodes::_arraylength); // who is complaining?
1089 1091 __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
1090 1092
1091 1093 #undef UNPUSH_RSI_RDI
1092 1094 }
1093 1095 break;
1094 1096
1095 1097 case _adapter_flyby:
1096 1098 case _adapter_ricochet:
1097 1099 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
1098 1100 break;
1099 1101
1100 1102 default: ShouldNotReachHere();
1101 1103 }
1102 1104 __ hlt();
1103 1105
1104 1106 address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
1105 1107 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
1106 1108
1107 1109 init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
1108 1110 }
↓ open down ↓ |
910 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX