Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/x86/vm/stubGenerator_x86_32.cpp
+++ new/src/cpu/x86/vm/stubGenerator_x86_32.cpp
1 1 /*
2 2 * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 #include "incls/_precompiled.incl"
26 26 #include "incls/_stubGenerator_x86_32.cpp.incl"
27 27
28 28 // Declaration and definition of StubGenerator (no .hpp file).
29 29 // For a more detailed description of the stub routine structure
30 30 // see the comment in stubRoutines.hpp
31 31
32 32 #define __ _masm->
33 33 #define a__ ((Assembler*)_masm)->
34 34
35 35 #ifdef PRODUCT
36 36 #define BLOCK_COMMENT(str) /* nothing */
37 37 #else
38 38 #define BLOCK_COMMENT(str) __ block_comment(str)
39 39 #endif
40 40
41 41 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
42 42
43 43 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions
44 44 const int FPU_CNTRL_WRD_MASK = 0xFFFF;
45 45
46 46 // -------------------------------------------------------------------------------------------------------------------------
47 47 // Stub Code definitions
48 48
49 49 static address handle_unsafe_access() {
50 50 JavaThread* thread = JavaThread::current();
51 51 address pc = thread->saved_exception_pc();
52 52 // pc is the instruction which we must emulate
53 53 // doing a no-op is fine: return garbage from the load
54 54 // therefore, compute npc
55 55 address npc = Assembler::locate_next_instruction(pc);
56 56
57 57 // request an async exception
58 58 thread->set_pending_unsafe_access_error();
59 59
60 60 // return address of next instruction to execute
61 61 return npc;
62 62 }
63 63
64 64 class StubGenerator: public StubCodeGenerator {
65 65 private:
66 66
67 67 #ifdef PRODUCT
68 68 #define inc_counter_np(counter) (0)
69 69 #else
70 70 void inc_counter_np_(int& counter) {
71 71 __ incrementl(ExternalAddress((address)&counter));
72 72 }
73 73 #define inc_counter_np(counter) \
74 74 BLOCK_COMMENT("inc_counter " #counter); \
75 75 inc_counter_np_(counter);
76 76 #endif //PRODUCT
77 77
78 78 void inc_copy_counter_np(BasicType t) {
79 79 #ifndef PRODUCT
80 80 switch (t) {
81 81 case T_BYTE: inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); return;
82 82 case T_SHORT: inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); return;
83 83 case T_INT: inc_counter_np(SharedRuntime::_jint_array_copy_ctr); return;
84 84 case T_LONG: inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); return;
85 85 case T_OBJECT: inc_counter_np(SharedRuntime::_oop_array_copy_ctr); return;
86 86 }
87 87 ShouldNotReachHere();
88 88 #endif //PRODUCT
89 89 }
90 90
91 91 //------------------------------------------------------------------------------------------------------------------------
92 92 // Call stubs are used to call Java from C
93 93 //
94 94 // [ return_from_Java ] <--- rsp
95 95 // [ argument word n ]
96 96 // ...
97 97 // -N [ argument word 1 ]
98 98 // -7 [ Possible padding for stack alignment ]
99 99 // -6 [ Possible padding for stack alignment ]
100 100 // -5 [ Possible padding for stack alignment ]
101 101 // -4 [ mxcsr save ] <--- rsp_after_call
102 102 // -3 [ saved rbx, ]
103 103 // -2 [ saved rsi ]
104 104 // -1 [ saved rdi ]
105 105 // 0 [ saved rbp, ] <--- rbp,
106 106 // 1 [ return address ]
107 107 // 2 [ ptr. to call wrapper ]
108 108 // 3 [ result ]
109 109 // 4 [ result_type ]
110 110 // 5 [ method ]
111 111 // 6 [ entry_point ]
112 112 // 7 [ parameters ]
113 113 // 8 [ parameter_size ]
114 114 // 9 [ thread ]
115 115
116 116
117 117 address generate_call_stub(address& return_address) {
118 118 StubCodeMark mark(this, "StubRoutines", "call_stub");
119 119 address start = __ pc();
120 120
121 121 // stub code parameters / addresses
122 122 assert(frame::entry_frame_call_wrapper_offset == 2, "adjust this code");
123 123 bool sse_save = false;
124 124 const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_catch_exception()!
125 125 const int locals_count_in_bytes (4*wordSize);
126 126 const Address mxcsr_save (rbp, -4 * wordSize);
127 127 const Address saved_rbx (rbp, -3 * wordSize);
128 128 const Address saved_rsi (rbp, -2 * wordSize);
129 129 const Address saved_rdi (rbp, -1 * wordSize);
130 130 const Address result (rbp, 3 * wordSize);
131 131 const Address result_type (rbp, 4 * wordSize);
132 132 const Address method (rbp, 5 * wordSize);
133 133 const Address entry_point (rbp, 6 * wordSize);
134 134 const Address parameters (rbp, 7 * wordSize);
135 135 const Address parameter_size(rbp, 8 * wordSize);
136 136 const Address thread (rbp, 9 * wordSize); // same as in generate_catch_exception()!
137 137 sse_save = UseSSE > 0;
138 138
139 139 // stub code
140 140 __ enter();
141 141 __ movptr(rcx, parameter_size); // parameter counter
142 142 __ shlptr(rcx, Interpreter::logStackElementSize()); // convert parameter count to bytes
143 143 __ addptr(rcx, locals_count_in_bytes); // reserve space for register saves
144 144 __ subptr(rsp, rcx);
145 145 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack
146 146
147 147 // save rdi, rsi, & rbx, according to C calling conventions
148 148 __ movptr(saved_rdi, rdi);
149 149 __ movptr(saved_rsi, rsi);
150 150 __ movptr(saved_rbx, rbx);
151 151 // save and initialize %mxcsr
152 152 if (sse_save) {
153 153 Label skip_ldmx;
154 154 __ stmxcsr(mxcsr_save);
155 155 __ movl(rax, mxcsr_save);
156 156 __ andl(rax, MXCSR_MASK); // Only check control and mask bits
157 157 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std());
158 158 __ cmp32(rax, mxcsr_std);
159 159 __ jcc(Assembler::equal, skip_ldmx);
160 160 __ ldmxcsr(mxcsr_std);
161 161 __ bind(skip_ldmx);
162 162 }
163 163
164 164 // make sure the control word is correct.
165 165 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
166 166
167 167 #ifdef ASSERT
168 168 // make sure we have no pending exceptions
169 169 { Label L;
170 170 __ movptr(rcx, thread);
171 171 __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
172 172 __ jcc(Assembler::equal, L);
173 173 __ stop("StubRoutines::call_stub: entered with pending exception");
174 174 __ bind(L);
175 175 }
176 176 #endif
177 177
178 178 // pass parameters if any
179 179 BLOCK_COMMENT("pass parameters if any");
180 180 Label parameters_done;
181 181 __ movl(rcx, parameter_size); // parameter counter
182 182 __ testl(rcx, rcx);
183 183 __ jcc(Assembler::zero, parameters_done);
184 184
185 185 // parameter passing loop
186 186
187 187 Label loop;
188 188 // Copy Java parameters in reverse order (receiver last)
189 189 // Note that the argument order is inverted in the process
190 190 // source is rdx[rcx: N-1..0]
191 191 // dest is rsp[rbx: 0..N-1]
192 192
193 193 __ movptr(rdx, parameters); // parameter pointer
194 194 __ xorptr(rbx, rbx);
195 195
196 196 __ BIND(loop);
197 197 if (TaggedStackInterpreter) {
198 198 __ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(),
199 199 -2*wordSize)); // get tag
200 200 __ movptr(Address(rsp, rbx, Interpreter::stackElementScale(),
201 201 Interpreter::expr_tag_offset_in_bytes(0)), rax); // store tag
202 202 }
203 203
204 204 // get parameter
205 205 __ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -wordSize));
206 206 __ movptr(Address(rsp, rbx, Interpreter::stackElementScale(),
207 207 Interpreter::expr_offset_in_bytes(0)), rax); // store parameter
208 208 __ increment(rbx);
209 209 __ decrement(rcx);
210 210 __ jcc(Assembler::notZero, loop);
211 211
212 212 // call Java function
213 213 __ BIND(parameters_done);
214 214 __ movptr(rbx, method); // get methodOop
215 215 __ movptr(rax, entry_point); // get entry_point
216 216 __ mov(rsi, rsp); // set sender sp
217 217 BLOCK_COMMENT("call Java function");
218 218 __ call(rax);
219 219
220 220 BLOCK_COMMENT("call_stub_return_address:");
221 221 return_address = __ pc();
222 222
223 223 Label common_return;
224 224
225 225 __ BIND(common_return);
226 226
227 227 // store result depending on type
228 228 // (everything that is not T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
229 229 __ movptr(rdi, result);
230 230 Label is_long, is_float, is_double, exit;
231 231 __ movl(rsi, result_type);
232 232 __ cmpl(rsi, T_LONG);
233 233 __ jcc(Assembler::equal, is_long);
234 234 __ cmpl(rsi, T_FLOAT);
235 235 __ jcc(Assembler::equal, is_float);
236 236 __ cmpl(rsi, T_DOUBLE);
237 237 __ jcc(Assembler::equal, is_double);
238 238
239 239 // handle T_INT case
240 240 __ movl(Address(rdi, 0), rax);
241 241 __ BIND(exit);
242 242
243 243 // check that FPU stack is empty
244 244 __ verify_FPU(0, "generate_call_stub");
245 245
246 246 // pop parameters
247 247 __ lea(rsp, rsp_after_call);
248 248
249 249 // restore %mxcsr
250 250 if (sse_save) {
251 251 __ ldmxcsr(mxcsr_save);
252 252 }
253 253
254 254 // restore rdi, rsi and rbx,
255 255 __ movptr(rbx, saved_rbx);
256 256 __ movptr(rsi, saved_rsi);
257 257 __ movptr(rdi, saved_rdi);
258 258 __ addptr(rsp, 4*wordSize);
259 259
260 260 // return
261 261 __ pop(rbp);
262 262 __ ret(0);
263 263
264 264 // handle return types different from T_INT
265 265 __ BIND(is_long);
266 266 __ movl(Address(rdi, 0 * wordSize), rax);
267 267 __ movl(Address(rdi, 1 * wordSize), rdx);
268 268 __ jmp(exit);
269 269
270 270 __ BIND(is_float);
271 271 // interpreter uses xmm0 for return values
272 272 if (UseSSE >= 1) {
273 273 __ movflt(Address(rdi, 0), xmm0);
274 274 } else {
275 275 __ fstp_s(Address(rdi, 0));
276 276 }
277 277 __ jmp(exit);
278 278
279 279 __ BIND(is_double);
280 280 // interpreter uses xmm0 for return values
281 281 if (UseSSE >= 2) {
282 282 __ movdbl(Address(rdi, 0), xmm0);
283 283 } else {
284 284 __ fstp_d(Address(rdi, 0));
285 285 }
286 286 __ jmp(exit);
287 287
288 288 // If we call compiled code directly from the call stub we will
289 289 // need to adjust the return back to the call stub to a specialized
290 290 // piece of code that can handle compiled results and cleaning the fpu
291 291 // stack. compiled code will be set to return here instead of the
292 292 // return above that handles interpreter returns.
293 293
294 294 BLOCK_COMMENT("call_stub_compiled_return:");
295 295 StubRoutines::x86::set_call_stub_compiled_return( __ pc());
296 296
297 297 #ifdef COMPILER2
298 298 if (UseSSE >= 2) {
299 299 __ verify_FPU(0, "call_stub_compiled_return");
300 300 } else {
301 301 for (int i = 1; i < 8; i++) {
302 302 __ ffree(i);
303 303 }
304 304
305 305 // UseSSE <= 1 so double result should be left on TOS
306 306 __ movl(rsi, result_type);
307 307 __ cmpl(rsi, T_DOUBLE);
308 308 __ jcc(Assembler::equal, common_return);
309 309 if (UseSSE == 0) {
310 310 // UseSSE == 0 so float result should be left on TOS
311 311 __ cmpl(rsi, T_FLOAT);
312 312 __ jcc(Assembler::equal, common_return);
313 313 }
314 314 __ ffree(0);
315 315 }
316 316 #endif /* COMPILER2 */
317 317 __ jmp(common_return);
318 318
319 319 return start;
320 320 }
321 321
322 322
323 323 //------------------------------------------------------------------------------------------------------------------------
324 324 // Return point for a Java call if there's an exception thrown in Java code.
325 325 // The exception is caught and transformed into a pending exception stored in
326 326 // JavaThread that can be tested from within the VM.
327 327 //
328 328 // Note: Usually the parameters are removed by the callee. In case of an exception
329 329 // crossing an activation frame boundary, that is not the case if the callee
330 330 // is compiled code => need to setup the rsp.
331 331 //
332 332 // rax,: exception oop
333 333
334 334 address generate_catch_exception() {
335 335 StubCodeMark mark(this, "StubRoutines", "catch_exception");
336 336 const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_call_stub()!
337 337 const Address thread (rbp, 9 * wordSize); // same as in generate_call_stub()!
338 338 address start = __ pc();
339 339
340 340 // get thread directly
341 341 __ movptr(rcx, thread);
342 342 #ifdef ASSERT
343 343 // verify that threads correspond
344 344 { Label L;
345 345 __ get_thread(rbx);
346 346 __ cmpptr(rbx, rcx);
347 347 __ jcc(Assembler::equal, L);
348 348 __ stop("StubRoutines::catch_exception: threads must correspond");
349 349 __ bind(L);
350 350 }
351 351 #endif
352 352 // set pending exception
353 353 __ verify_oop(rax);
354 354 __ movptr(Address(rcx, Thread::pending_exception_offset()), rax );
355 355 __ lea(Address(rcx, Thread::exception_file_offset ()),
356 356 ExternalAddress((address)__FILE__));
357 357 __ movl(Address(rcx, Thread::exception_line_offset ()), __LINE__ );
358 358 // complete return to VM
359 359 assert(StubRoutines::_call_stub_return_address != NULL, "_call_stub_return_address must have been generated before");
360 360 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address));
361 361
↓ open down ↓ |
361 lines elided |
↑ open up ↑ |
362 362 return start;
363 363 }
364 364
365 365
366 366 //------------------------------------------------------------------------------------------------------------------------
367 367 // Continuation point for runtime calls returning with a pending exception.
368 368 // The pending exception check happened in the runtime or native call stub.
369 369 // The pending exception in Thread is converted into a Java-level exception.
370 370 //
371 371 // Contract with Java-level exception handlers:
372 - // rax,: exception
372 + // rax: exception
373 373 // rdx: throwing pc
374 374 //
375 375 // NOTE: At entry of this stub, exception-pc must be on stack !!
376 376
377 377 address generate_forward_exception() {
378 378 StubCodeMark mark(this, "StubRoutines", "forward exception");
379 379 address start = __ pc();
380 + const Register thread = rcx;
381 +
382 + // other registers used in this stub
383 + const Register exception_oop = rax;
384 + const Register handler_addr = rbx;
385 + const Register exception_pc = rdx;
380 386
381 387 // Upon entry, the sp points to the return address returning into Java
382 388 // (interpreted or compiled) code; i.e., the return address becomes the
383 389 // throwing pc.
384 390 //
385 391 // Arguments pushed before the runtime call are still on the stack but
386 392 // the exception handler will reset the stack pointer -> ignore them.
387 393 // A potential result in registers can be ignored as well.
388 394
389 395 #ifdef ASSERT
390 396 // make sure this code is only executed if there is a pending exception
391 397 { Label L;
392 - __ get_thread(rcx);
393 - __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
398 + __ get_thread(thread);
399 + __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
394 400 __ jcc(Assembler::notEqual, L);
395 401 __ stop("StubRoutines::forward exception: no pending exception (1)");
396 402 __ bind(L);
397 403 }
398 404 #endif
399 405
400 406 // compute exception handler into rbx,
401 - __ movptr(rax, Address(rsp, 0));
407 + __ get_thread(thread);
408 + __ movptr(exception_pc, Address(rsp, 0));
402 409 BLOCK_COMMENT("call exception_handler_for_return_address");
403 - __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rax);
404 - __ mov(rbx, rax);
410 + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
411 + __ mov(handler_addr, rax);
405 412
406 - // setup rax, & rdx, remove return address & clear pending exception
407 - __ get_thread(rcx);
408 - __ pop(rdx);
409 - __ movptr(rax, Address(rcx, Thread::pending_exception_offset()));
410 - __ movptr(Address(rcx, Thread::pending_exception_offset()), NULL_WORD);
413 + // setup rax & rdx, remove return address & clear pending exception
414 + __ get_thread(thread);
415 + __ pop(exception_pc);
416 + __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
417 + __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
411 418
412 419 #ifdef ASSERT
413 420 // make sure exception is set
414 421 { Label L;
415 - __ testptr(rax, rax);
422 + __ testptr(exception_oop, exception_oop);
416 423 __ jcc(Assembler::notEqual, L);
417 424 __ stop("StubRoutines::forward exception: no pending exception (2)");
418 425 __ bind(L);
419 426 }
420 427 #endif
421 428
429 + // Verify that there is really a valid exception in RAX.
430 + __ verify_oop(exception_oop);
431 +
432 + // Restore SP from BP if the exception PC is a MethodHandle call site.
433 + __ cmpl(Address(thread, JavaThread::is_method_handle_exception_offset()), 0);
434 + __ cmovptr(Assembler::notEqual, rsp, rbp);
435 +
422 436 // continue at exception handler (return address removed)
423 - // rax,: exception
424 - // rbx,: exception handler
437 + // rax: exception
438 + // rbx: exception handler
425 439 // rdx: throwing pc
426 - __ verify_oop(rax);
427 - __ jmp(rbx);
440 + __ jmp(handler_addr);
428 441
429 442 return start;
430 443 }
431 444
432 445
433 446 //----------------------------------------------------------------------------------------------------
434 447 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest)
435 448 //
436 449 // xchg exists as far back as 8086, lock needed for MP only
437 450 // Stack layout immediately after call:
438 451 //
439 452 // 0 [ret addr ] <--- rsp
440 453 // 1 [ ex ]
441 454 // 2 [ dest ]
442 455 //
443 456 // Result: *dest <- ex, return (old *dest)
444 457 //
445 458 // Note: win32 does not currently use this code
446 459
447 460 address generate_atomic_xchg() {
448 461 StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
449 462 address start = __ pc();
450 463
451 464 __ push(rdx);
452 465 Address exchange(rsp, 2 * wordSize);
453 466 Address dest_addr(rsp, 3 * wordSize);
454 467 __ movl(rax, exchange);
455 468 __ movptr(rdx, dest_addr);
456 469 __ xchgl(rax, Address(rdx, 0));
457 470 __ pop(rdx);
458 471 __ ret(0);
459 472
460 473 return start;
461 474 }
462 475
463 476 //----------------------------------------------------------------------------------------------------
464 477 // Support for void verify_mxcsr()
465 478 //
466 479 // This routine is used with -Xcheck:jni to verify that native
467 480 // JNI code does not return to Java code without restoring the
468 481 // MXCSR register to our expected state.
469 482
470 483
471 484 address generate_verify_mxcsr() {
472 485 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr");
473 486 address start = __ pc();
474 487
475 488 const Address mxcsr_save(rsp, 0);
476 489
477 490 if (CheckJNICalls && UseSSE > 0 ) {
478 491 Label ok_ret;
479 492 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std());
480 493 __ push(rax);
481 494 __ subptr(rsp, wordSize); // allocate a temp location
482 495 __ stmxcsr(mxcsr_save);
483 496 __ movl(rax, mxcsr_save);
484 497 __ andl(rax, MXCSR_MASK);
485 498 __ cmp32(rax, mxcsr_std);
486 499 __ jcc(Assembler::equal, ok_ret);
487 500
488 501 __ warn("MXCSR changed by native JNI code.");
489 502
490 503 __ ldmxcsr(mxcsr_std);
491 504
492 505 __ bind(ok_ret);
493 506 __ addptr(rsp, wordSize);
494 507 __ pop(rax);
495 508 }
496 509
497 510 __ ret(0);
498 511
499 512 return start;
500 513 }
501 514
502 515
503 516 //---------------------------------------------------------------------------
504 517 // Support for void verify_fpu_cntrl_wrd()
505 518 //
506 519 // This routine is used with -Xcheck:jni to verify that native
507 520 // JNI code does not return to Java code without restoring the
508 521 // FP control word to our expected state.
509 522
510 523 address generate_verify_fpu_cntrl_wrd() {
511 524 StubCodeMark mark(this, "StubRoutines", "verify_spcw");
512 525 address start = __ pc();
513 526
514 527 const Address fpu_cntrl_wrd_save(rsp, 0);
515 528
516 529 if (CheckJNICalls) {
517 530 Label ok_ret;
518 531 __ push(rax);
519 532 __ subptr(rsp, wordSize); // allocate a temp location
520 533 __ fnstcw(fpu_cntrl_wrd_save);
521 534 __ movl(rax, fpu_cntrl_wrd_save);
522 535 __ andl(rax, FPU_CNTRL_WRD_MASK);
523 536 ExternalAddress fpu_std(StubRoutines::addr_fpu_cntrl_wrd_std());
524 537 __ cmp32(rax, fpu_std);
525 538 __ jcc(Assembler::equal, ok_ret);
526 539
527 540 __ warn("Floating point control word changed by native JNI code.");
528 541
529 542 __ fldcw(fpu_std);
530 543
531 544 __ bind(ok_ret);
532 545 __ addptr(rsp, wordSize);
533 546 __ pop(rax);
534 547 }
535 548
536 549 __ ret(0);
537 550
538 551 return start;
539 552 }
540 553
541 554 //---------------------------------------------------------------------------
542 555 // Wrapper for slow-case handling of double-to-integer conversion
543 556 // d2i or f2i fast case failed either because it is nan or because
544 557 // of under/overflow.
545 558 // Input: FPU TOS: float value
546 559 // Output: rax, (rdx): integer (long) result
547 560
548 561 address generate_d2i_wrapper(BasicType t, address fcn) {
549 562 StubCodeMark mark(this, "StubRoutines", "d2i_wrapper");
550 563 address start = __ pc();
551 564
552 565 // Capture info about frame layout
553 566 enum layout { FPUState_off = 0,
554 567 rbp_off = FPUStateSizeInWords,
555 568 rdi_off,
556 569 rsi_off,
557 570 rcx_off,
558 571 rbx_off,
559 572 saved_argument_off,
560 573 saved_argument_off2, // 2nd half of double
561 574 framesize
562 575 };
563 576
564 577 assert(FPUStateSizeInWords == 27, "update stack layout");
565 578
566 579 // Save outgoing argument to stack across push_FPU_state()
567 580 __ subptr(rsp, wordSize * 2);
568 581 __ fstp_d(Address(rsp, 0));
569 582
570 583 // Save CPU & FPU state
571 584 __ push(rbx);
572 585 __ push(rcx);
573 586 __ push(rsi);
574 587 __ push(rdi);
575 588 __ push(rbp);
576 589 __ push_FPU_state();
577 590
578 591 // push_FPU_state() resets the FP top of stack
579 592 // Load original double into FP top of stack
580 593 __ fld_d(Address(rsp, saved_argument_off * wordSize));
581 594 // Store double into stack as outgoing argument
582 595 __ subptr(rsp, wordSize*2);
583 596 __ fst_d(Address(rsp, 0));
584 597
585 598 // Prepare FPU for doing math in C-land
586 599 __ empty_FPU_stack();
587 600 // Call the C code to massage the double. Result in EAX
588 601 if (t == T_INT)
589 602 { BLOCK_COMMENT("SharedRuntime::d2i"); }
590 603 else if (t == T_LONG)
591 604 { BLOCK_COMMENT("SharedRuntime::d2l"); }
592 605 __ call_VM_leaf( fcn, 2 );
593 606
594 607 // Restore CPU & FPU state
595 608 __ pop_FPU_state();
596 609 __ pop(rbp);
597 610 __ pop(rdi);
598 611 __ pop(rsi);
599 612 __ pop(rcx);
600 613 __ pop(rbx);
601 614 __ addptr(rsp, wordSize * 2);
602 615
603 616 __ ret(0);
604 617
605 618 return start;
606 619 }
607 620
608 621
609 622 //---------------------------------------------------------------------------
610 623 // The following routine generates a subroutine to throw an asynchronous
611 624 // UnknownError when an unsafe access gets a fault that could not be
612 625 // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.)
613 626 address generate_handler_for_unsafe_access() {
614 627 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
615 628 address start = __ pc();
616 629
617 630 __ push(0); // hole for return address-to-be
618 631 __ pusha(); // push registers
619 632 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
620 633 BLOCK_COMMENT("call handle_unsafe_access");
621 634 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access)));
622 635 __ movptr(next_pc, rax); // stuff next address
623 636 __ popa();
624 637 __ ret(0); // jump to next address
625 638
626 639 return start;
627 640 }
628 641
629 642
630 643 //----------------------------------------------------------------------------------------------------
631 644 // Non-destructive plausibility checks for oops
632 645
633 646 address generate_verify_oop() {
634 647 StubCodeMark mark(this, "StubRoutines", "verify_oop");
635 648 address start = __ pc();
636 649
637 650 // Incoming arguments on stack after saving rax,:
638 651 //
639 652 // [tos ]: saved rdx
640 653 // [tos + 1]: saved EFLAGS
641 654 // [tos + 2]: return address
642 655 // [tos + 3]: char* error message
643 656 // [tos + 4]: oop object to verify
644 657 // [tos + 5]: saved rax, - saved by caller and bashed
645 658
646 659 Label exit, error;
647 660 __ pushf();
648 661 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr()));
649 662 __ push(rdx); // save rdx
650 663 // make sure object is 'reasonable'
651 664 __ movptr(rax, Address(rsp, 4 * wordSize)); // get object
652 665 __ testptr(rax, rax);
653 666 __ jcc(Assembler::zero, exit); // if obj is NULL it is ok
654 667
655 668 // Check if the oop is in the right area of memory
656 669 const int oop_mask = Universe::verify_oop_mask();
657 670 const int oop_bits = Universe::verify_oop_bits();
658 671 __ mov(rdx, rax);
659 672 __ andptr(rdx, oop_mask);
660 673 __ cmpptr(rdx, oop_bits);
661 674 __ jcc(Assembler::notZero, error);
662 675
663 676 // make sure klass is 'reasonable'
664 677 __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass
665 678 __ testptr(rax, rax);
666 679 __ jcc(Assembler::zero, error); // if klass is NULL it is broken
667 680
668 681 // Check if the klass is in the right area of memory
669 682 const int klass_mask = Universe::verify_klass_mask();
670 683 const int klass_bits = Universe::verify_klass_bits();
671 684 __ mov(rdx, rax);
672 685 __ andptr(rdx, klass_mask);
673 686 __ cmpptr(rdx, klass_bits);
674 687 __ jcc(Assembler::notZero, error);
675 688
676 689 // make sure klass' klass is 'reasonable'
677 690 __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass' klass
678 691 __ testptr(rax, rax);
679 692 __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken
680 693
681 694 __ mov(rdx, rax);
682 695 __ andptr(rdx, klass_mask);
683 696 __ cmpptr(rdx, klass_bits);
684 697 __ jcc(Assembler::notZero, error); // if klass not in right area
685 698 // of memory it is broken too.
686 699
687 700 // return if everything seems ok
688 701 __ bind(exit);
689 702 __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back
690 703 __ pop(rdx); // restore rdx
691 704 __ popf(); // restore EFLAGS
692 705 __ ret(3 * wordSize); // pop arguments
693 706
694 707 // handle errors
695 708 __ bind(error);
696 709 __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back
697 710 __ pop(rdx); // get saved rdx back
698 711 __ popf(); // get saved EFLAGS off stack -- will be ignored
699 712 __ pusha(); // push registers (eip = return address & msg are already pushed)
700 713 BLOCK_COMMENT("call MacroAssembler::debug");
701 714 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
702 715 __ popa();
703 716 __ ret(3 * wordSize); // pop arguments
704 717 return start;
705 718 }
706 719
707 720 //
708 721 // Generate pre-barrier for array stores
709 722 //
710 723 // Input:
711 724 // start - starting address
712 725 // count - element count
713 726 void gen_write_ref_array_pre_barrier(Register start, Register count) {
714 727 assert_different_registers(start, count);
715 728 BarrierSet* bs = Universe::heap()->barrier_set();
716 729 switch (bs->kind()) {
717 730 case BarrierSet::G1SATBCT:
718 731 case BarrierSet::G1SATBCTLogging:
719 732 {
720 733 __ pusha(); // push registers
721 734 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre),
722 735 start, count);
723 736 __ popa();
724 737 }
725 738 break;
726 739 case BarrierSet::CardTableModRef:
727 740 case BarrierSet::CardTableExtension:
728 741 case BarrierSet::ModRef:
729 742 break;
730 743 default :
731 744 ShouldNotReachHere();
732 745
733 746 }
734 747 }
735 748
736 749
737 750 //
738 751 // Generate a post-barrier for an array store
739 752 //
740 753 // start - starting address
741 754 // count - element count
742 755 //
743 756 // The two input registers are overwritten.
744 757 //
745 758 void gen_write_ref_array_post_barrier(Register start, Register count) {
746 759 BarrierSet* bs = Universe::heap()->barrier_set();
747 760 assert_different_registers(start, count);
748 761 switch (bs->kind()) {
749 762 case BarrierSet::G1SATBCT:
750 763 case BarrierSet::G1SATBCTLogging:
751 764 {
752 765 __ pusha(); // push registers
753 766 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post),
754 767 start, count);
755 768 __ popa();
756 769 }
757 770 break;
758 771
759 772 case BarrierSet::CardTableModRef:
760 773 case BarrierSet::CardTableExtension:
761 774 {
762 775 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
763 776 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
764 777
765 778 Label L_loop;
766 779 const Register end = count; // elements count; end == start+count-1
767 780 assert_different_registers(start, end);
768 781
769 782 __ lea(end, Address(start, count, Address::times_ptr, -wordSize));
770 783 __ shrptr(start, CardTableModRefBS::card_shift);
771 784 __ shrptr(end, CardTableModRefBS::card_shift);
772 785 __ subptr(end, start); // end --> count
773 786 __ BIND(L_loop);
774 787 intptr_t disp = (intptr_t) ct->byte_map_base;
775 788 Address cardtable(start, count, Address::times_1, disp);
776 789 __ movb(cardtable, 0);
777 790 __ decrement(count);
778 791 __ jcc(Assembler::greaterEqual, L_loop);
779 792 }
780 793 break;
781 794 case BarrierSet::ModRef:
782 795 break;
783 796 default :
784 797 ShouldNotReachHere();
785 798
786 799 }
787 800 }
788 801
789 802
790 803 // Copy 64 bytes chunks
791 804 //
792 805 // Inputs:
793 806 // from - source array address
794 807 // to_from - destination array address - from
795 808 // qword_count - 8-bytes element count, negative
796 809 //
797 810 void xmm_copy_forward(Register from, Register to_from, Register qword_count) {
798 811 assert( UseSSE >= 2, "supported cpu only" );
799 812 Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit;
800 813 // Copy 64-byte chunks
801 814 __ jmpb(L_copy_64_bytes);
802 815 __ align(16);
803 816 __ BIND(L_copy_64_bytes_loop);
804 817
805 818 if(UseUnalignedLoadStores) {
806 819 __ movdqu(xmm0, Address(from, 0));
807 820 __ movdqu(Address(from, to_from, Address::times_1, 0), xmm0);
808 821 __ movdqu(xmm1, Address(from, 16));
809 822 __ movdqu(Address(from, to_from, Address::times_1, 16), xmm1);
810 823 __ movdqu(xmm2, Address(from, 32));
811 824 __ movdqu(Address(from, to_from, Address::times_1, 32), xmm2);
812 825 __ movdqu(xmm3, Address(from, 48));
813 826 __ movdqu(Address(from, to_from, Address::times_1, 48), xmm3);
814 827
815 828 } else {
816 829 __ movq(xmm0, Address(from, 0));
817 830 __ movq(Address(from, to_from, Address::times_1, 0), xmm0);
818 831 __ movq(xmm1, Address(from, 8));
819 832 __ movq(Address(from, to_from, Address::times_1, 8), xmm1);
820 833 __ movq(xmm2, Address(from, 16));
821 834 __ movq(Address(from, to_from, Address::times_1, 16), xmm2);
822 835 __ movq(xmm3, Address(from, 24));
823 836 __ movq(Address(from, to_from, Address::times_1, 24), xmm3);
824 837 __ movq(xmm4, Address(from, 32));
825 838 __ movq(Address(from, to_from, Address::times_1, 32), xmm4);
826 839 __ movq(xmm5, Address(from, 40));
827 840 __ movq(Address(from, to_from, Address::times_1, 40), xmm5);
828 841 __ movq(xmm6, Address(from, 48));
829 842 __ movq(Address(from, to_from, Address::times_1, 48), xmm6);
830 843 __ movq(xmm7, Address(from, 56));
831 844 __ movq(Address(from, to_from, Address::times_1, 56), xmm7);
832 845 }
833 846
834 847 __ addl(from, 64);
835 848 __ BIND(L_copy_64_bytes);
836 849 __ subl(qword_count, 8);
837 850 __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop);
838 851 __ addl(qword_count, 8);
839 852 __ jccb(Assembler::zero, L_exit);
840 853 //
841 854 // length is too short, just copy qwords
842 855 //
843 856 __ BIND(L_copy_8_bytes);
844 857 __ movq(xmm0, Address(from, 0));
845 858 __ movq(Address(from, to_from, Address::times_1), xmm0);
846 859 __ addl(from, 8);
847 860 __ decrement(qword_count);
848 861 __ jcc(Assembler::greater, L_copy_8_bytes);
849 862 __ BIND(L_exit);
850 863 }
851 864
852 865 // Copy 64 bytes chunks
853 866 //
854 867 // Inputs:
855 868 // from - source array address
856 869 // to_from - destination array address - from
857 870 // qword_count - 8-bytes element count, negative
858 871 //
859 872 void mmx_copy_forward(Register from, Register to_from, Register qword_count) {
860 873 assert( VM_Version::supports_mmx(), "supported cpu only" );
861 874 Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit;
862 875 // Copy 64-byte chunks
863 876 __ jmpb(L_copy_64_bytes);
864 877 __ align(16);
865 878 __ BIND(L_copy_64_bytes_loop);
866 879 __ movq(mmx0, Address(from, 0));
867 880 __ movq(mmx1, Address(from, 8));
868 881 __ movq(mmx2, Address(from, 16));
869 882 __ movq(Address(from, to_from, Address::times_1, 0), mmx0);
870 883 __ movq(mmx3, Address(from, 24));
871 884 __ movq(Address(from, to_from, Address::times_1, 8), mmx1);
872 885 __ movq(mmx4, Address(from, 32));
873 886 __ movq(Address(from, to_from, Address::times_1, 16), mmx2);
874 887 __ movq(mmx5, Address(from, 40));
875 888 __ movq(Address(from, to_from, Address::times_1, 24), mmx3);
876 889 __ movq(mmx6, Address(from, 48));
877 890 __ movq(Address(from, to_from, Address::times_1, 32), mmx4);
878 891 __ movq(mmx7, Address(from, 56));
879 892 __ movq(Address(from, to_from, Address::times_1, 40), mmx5);
880 893 __ movq(Address(from, to_from, Address::times_1, 48), mmx6);
881 894 __ movq(Address(from, to_from, Address::times_1, 56), mmx7);
882 895 __ addptr(from, 64);
883 896 __ BIND(L_copy_64_bytes);
884 897 __ subl(qword_count, 8);
885 898 __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop);
886 899 __ addl(qword_count, 8);
887 900 __ jccb(Assembler::zero, L_exit);
888 901 //
889 902 // length is too short, just copy qwords
890 903 //
891 904 __ BIND(L_copy_8_bytes);
892 905 __ movq(mmx0, Address(from, 0));
893 906 __ movq(Address(from, to_from, Address::times_1), mmx0);
894 907 __ addptr(from, 8);
895 908 __ decrement(qword_count);
896 909 __ jcc(Assembler::greater, L_copy_8_bytes);
897 910 __ BIND(L_exit);
898 911 __ emms();
899 912 }
900 913
901 914 address generate_disjoint_copy(BasicType t, bool aligned,
902 915 Address::ScaleFactor sf,
903 916 address* entry, const char *name) {
904 917 __ align(CodeEntryAlignment);
905 918 StubCodeMark mark(this, "StubRoutines", name);
906 919 address start = __ pc();
907 920
908 921 Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte;
909 922 Label L_copy_2_bytes, L_copy_4_bytes, L_copy_64_bytes;
910 923
911 924 int shift = Address::times_ptr - sf;
912 925
913 926 const Register from = rsi; // source array address
914 927 const Register to = rdi; // destination array address
915 928 const Register count = rcx; // elements count
916 929 const Register to_from = to; // (to - from)
917 930 const Register saved_to = rdx; // saved destination array address
918 931
919 932 __ enter(); // required for proper stackwalking of RuntimeStub frame
920 933 __ push(rsi);
921 934 __ push(rdi);
922 935 __ movptr(from , Address(rsp, 12+ 4));
923 936 __ movptr(to , Address(rsp, 12+ 8));
924 937 __ movl(count, Address(rsp, 12+ 12));
925 938 if (t == T_OBJECT) {
926 939 __ testl(count, count);
927 940 __ jcc(Assembler::zero, L_0_count);
928 941 gen_write_ref_array_pre_barrier(to, count);
929 942 __ mov(saved_to, to); // save 'to'
930 943 }
931 944
932 945 *entry = __ pc(); // Entry point from conjoint arraycopy stub.
933 946 BLOCK_COMMENT("Entry:");
934 947
935 948 __ subptr(to, from); // to --> to_from
936 949 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
937 950 __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp
938 951 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
939 952 // align source address at 4 bytes address boundary
940 953 if (t == T_BYTE) {
941 954 // One byte misalignment happens only for byte arrays
942 955 __ testl(from, 1);
943 956 __ jccb(Assembler::zero, L_skip_align1);
944 957 __ movb(rax, Address(from, 0));
945 958 __ movb(Address(from, to_from, Address::times_1, 0), rax);
946 959 __ increment(from);
947 960 __ decrement(count);
948 961 __ BIND(L_skip_align1);
949 962 }
950 963 // Two bytes misalignment happens only for byte and short (char) arrays
951 964 __ testl(from, 2);
952 965 __ jccb(Assembler::zero, L_skip_align2);
953 966 __ movw(rax, Address(from, 0));
954 967 __ movw(Address(from, to_from, Address::times_1, 0), rax);
955 968 __ addptr(from, 2);
956 969 __ subl(count, 1<<(shift-1));
957 970 __ BIND(L_skip_align2);
958 971 }
959 972 if (!VM_Version::supports_mmx()) {
960 973 __ mov(rax, count); // save 'count'
961 974 __ shrl(count, shift); // bytes count
962 975 __ addptr(to_from, from);// restore 'to'
963 976 __ rep_mov();
964 977 __ subptr(to_from, from);// restore 'to_from'
965 978 __ mov(count, rax); // restore 'count'
966 979 __ jmpb(L_copy_2_bytes); // all dwords were copied
967 980 } else {
968 981 if (!UseUnalignedLoadStores) {
969 982 // align to 8 bytes, we know we are 4 byte aligned to start
970 983 __ testptr(from, 4);
971 984 __ jccb(Assembler::zero, L_copy_64_bytes);
972 985 __ movl(rax, Address(from, 0));
973 986 __ movl(Address(from, to_from, Address::times_1, 0), rax);
974 987 __ addptr(from, 4);
975 988 __ subl(count, 1<<shift);
976 989 }
977 990 __ BIND(L_copy_64_bytes);
978 991 __ mov(rax, count);
979 992 __ shrl(rax, shift+1); // 8 bytes chunk count
980 993 //
981 994 // Copy 8-byte chunks through MMX registers, 8 per iteration of the loop
982 995 //
983 996 if (UseXMMForArrayCopy) {
984 997 xmm_copy_forward(from, to_from, rax);
985 998 } else {
986 999 mmx_copy_forward(from, to_from, rax);
987 1000 }
988 1001 }
989 1002 // copy tailing dword
990 1003 __ BIND(L_copy_4_bytes);
991 1004 __ testl(count, 1<<shift);
992 1005 __ jccb(Assembler::zero, L_copy_2_bytes);
993 1006 __ movl(rax, Address(from, 0));
994 1007 __ movl(Address(from, to_from, Address::times_1, 0), rax);
995 1008 if (t == T_BYTE || t == T_SHORT) {
996 1009 __ addptr(from, 4);
997 1010 __ BIND(L_copy_2_bytes);
998 1011 // copy tailing word
999 1012 __ testl(count, 1<<(shift-1));
1000 1013 __ jccb(Assembler::zero, L_copy_byte);
1001 1014 __ movw(rax, Address(from, 0));
1002 1015 __ movw(Address(from, to_from, Address::times_1, 0), rax);
1003 1016 if (t == T_BYTE) {
1004 1017 __ addptr(from, 2);
1005 1018 __ BIND(L_copy_byte);
1006 1019 // copy tailing byte
1007 1020 __ testl(count, 1);
1008 1021 __ jccb(Assembler::zero, L_exit);
1009 1022 __ movb(rax, Address(from, 0));
1010 1023 __ movb(Address(from, to_from, Address::times_1, 0), rax);
1011 1024 __ BIND(L_exit);
1012 1025 } else {
1013 1026 __ BIND(L_copy_byte);
1014 1027 }
1015 1028 } else {
1016 1029 __ BIND(L_copy_2_bytes);
1017 1030 }
1018 1031
1019 1032 if (t == T_OBJECT) {
1020 1033 __ movl(count, Address(rsp, 12+12)); // reread 'count'
1021 1034 __ mov(to, saved_to); // restore 'to'
1022 1035 gen_write_ref_array_post_barrier(to, count);
1023 1036 __ BIND(L_0_count);
1024 1037 }
1025 1038 inc_copy_counter_np(t);
1026 1039 __ pop(rdi);
1027 1040 __ pop(rsi);
1028 1041 __ leave(); // required for proper stackwalking of RuntimeStub frame
1029 1042 __ xorptr(rax, rax); // return 0
1030 1043 __ ret(0);
1031 1044 return start;
1032 1045 }
1033 1046
1034 1047
1035 1048 address generate_conjoint_copy(BasicType t, bool aligned,
1036 1049 Address::ScaleFactor sf,
1037 1050 address nooverlap_target,
1038 1051 address* entry, const char *name) {
1039 1052 __ align(CodeEntryAlignment);
1040 1053 StubCodeMark mark(this, "StubRoutines", name);
1041 1054 address start = __ pc();
1042 1055
1043 1056 Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte;
1044 1057 Label L_copy_2_bytes, L_copy_4_bytes, L_copy_8_bytes, L_copy_8_bytes_loop;
1045 1058
1046 1059 int shift = Address::times_ptr - sf;
1047 1060
1048 1061 const Register src = rax; // source array address
1049 1062 const Register dst = rdx; // destination array address
1050 1063 const Register from = rsi; // source array address
1051 1064 const Register to = rdi; // destination array address
1052 1065 const Register count = rcx; // elements count
1053 1066 const Register end = rax; // array end address
1054 1067
1055 1068 __ enter(); // required for proper stackwalking of RuntimeStub frame
1056 1069 __ push(rsi);
1057 1070 __ push(rdi);
1058 1071 __ movptr(src , Address(rsp, 12+ 4)); // from
1059 1072 __ movptr(dst , Address(rsp, 12+ 8)); // to
1060 1073 __ movl2ptr(count, Address(rsp, 12+12)); // count
1061 1074 if (t == T_OBJECT) {
1062 1075 gen_write_ref_array_pre_barrier(dst, count);
1063 1076 }
1064 1077
1065 1078 if (entry != NULL) {
1066 1079 *entry = __ pc(); // Entry point from generic arraycopy stub.
1067 1080 BLOCK_COMMENT("Entry:");
1068 1081 }
1069 1082
1070 1083 if (t == T_OBJECT) {
1071 1084 __ testl(count, count);
1072 1085 __ jcc(Assembler::zero, L_0_count);
1073 1086 }
1074 1087 __ mov(from, src);
1075 1088 __ mov(to , dst);
1076 1089
1077 1090 // arrays overlap test
1078 1091 RuntimeAddress nooverlap(nooverlap_target);
1079 1092 __ cmpptr(dst, src);
1080 1093 __ lea(end, Address(src, count, sf, 0)); // src + count * elem_size
1081 1094 __ jump_cc(Assembler::belowEqual, nooverlap);
1082 1095 __ cmpptr(dst, end);
1083 1096 __ jump_cc(Assembler::aboveEqual, nooverlap);
1084 1097
1085 1098 // copy from high to low
1086 1099 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
1087 1100 __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp
1088 1101 if (t == T_BYTE || t == T_SHORT) {
1089 1102 // Align the end of destination array at 4 bytes address boundary
1090 1103 __ lea(end, Address(dst, count, sf, 0));
1091 1104 if (t == T_BYTE) {
1092 1105 // One byte misalignment happens only for byte arrays
1093 1106 __ testl(end, 1);
1094 1107 __ jccb(Assembler::zero, L_skip_align1);
1095 1108 __ decrement(count);
1096 1109 __ movb(rdx, Address(from, count, sf, 0));
1097 1110 __ movb(Address(to, count, sf, 0), rdx);
1098 1111 __ BIND(L_skip_align1);
1099 1112 }
1100 1113 // Two bytes misalignment happens only for byte and short (char) arrays
1101 1114 __ testl(end, 2);
1102 1115 __ jccb(Assembler::zero, L_skip_align2);
1103 1116 __ subptr(count, 1<<(shift-1));
1104 1117 __ movw(rdx, Address(from, count, sf, 0));
1105 1118 __ movw(Address(to, count, sf, 0), rdx);
1106 1119 __ BIND(L_skip_align2);
1107 1120 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
1108 1121 __ jcc(Assembler::below, L_copy_4_bytes);
1109 1122 }
1110 1123
1111 1124 if (!VM_Version::supports_mmx()) {
1112 1125 __ std();
1113 1126 __ mov(rax, count); // Save 'count'
1114 1127 __ mov(rdx, to); // Save 'to'
1115 1128 __ lea(rsi, Address(from, count, sf, -4));
1116 1129 __ lea(rdi, Address(to , count, sf, -4));
1117 1130 __ shrptr(count, shift); // bytes count
1118 1131 __ rep_mov();
1119 1132 __ cld();
1120 1133 __ mov(count, rax); // restore 'count'
1121 1134 __ andl(count, (1<<shift)-1); // mask the number of rest elements
1122 1135 __ movptr(from, Address(rsp, 12+4)); // reread 'from'
1123 1136 __ mov(to, rdx); // restore 'to'
1124 1137 __ jmpb(L_copy_2_bytes); // all dword were copied
1125 1138 } else {
1126 1139 // Align to 8 bytes the end of array. It is aligned to 4 bytes already.
1127 1140 __ testptr(end, 4);
1128 1141 __ jccb(Assembler::zero, L_copy_8_bytes);
1129 1142 __ subl(count, 1<<shift);
1130 1143 __ movl(rdx, Address(from, count, sf, 0));
1131 1144 __ movl(Address(to, count, sf, 0), rdx);
1132 1145 __ jmpb(L_copy_8_bytes);
1133 1146
1134 1147 __ align(16);
1135 1148 // Move 8 bytes
1136 1149 __ BIND(L_copy_8_bytes_loop);
1137 1150 if (UseXMMForArrayCopy) {
1138 1151 __ movq(xmm0, Address(from, count, sf, 0));
1139 1152 __ movq(Address(to, count, sf, 0), xmm0);
1140 1153 } else {
1141 1154 __ movq(mmx0, Address(from, count, sf, 0));
1142 1155 __ movq(Address(to, count, sf, 0), mmx0);
1143 1156 }
1144 1157 __ BIND(L_copy_8_bytes);
1145 1158 __ subl(count, 2<<shift);
1146 1159 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
1147 1160 __ addl(count, 2<<shift);
1148 1161 if (!UseXMMForArrayCopy) {
1149 1162 __ emms();
1150 1163 }
1151 1164 }
1152 1165 __ BIND(L_copy_4_bytes);
1153 1166 // copy prefix qword
1154 1167 __ testl(count, 1<<shift);
1155 1168 __ jccb(Assembler::zero, L_copy_2_bytes);
1156 1169 __ movl(rdx, Address(from, count, sf, -4));
1157 1170 __ movl(Address(to, count, sf, -4), rdx);
1158 1171
1159 1172 if (t == T_BYTE || t == T_SHORT) {
1160 1173 __ subl(count, (1<<shift));
1161 1174 __ BIND(L_copy_2_bytes);
1162 1175 // copy prefix dword
1163 1176 __ testl(count, 1<<(shift-1));
1164 1177 __ jccb(Assembler::zero, L_copy_byte);
1165 1178 __ movw(rdx, Address(from, count, sf, -2));
1166 1179 __ movw(Address(to, count, sf, -2), rdx);
1167 1180 if (t == T_BYTE) {
1168 1181 __ subl(count, 1<<(shift-1));
1169 1182 __ BIND(L_copy_byte);
1170 1183 // copy prefix byte
1171 1184 __ testl(count, 1);
1172 1185 __ jccb(Assembler::zero, L_exit);
1173 1186 __ movb(rdx, Address(from, 0));
1174 1187 __ movb(Address(to, 0), rdx);
1175 1188 __ BIND(L_exit);
1176 1189 } else {
1177 1190 __ BIND(L_copy_byte);
1178 1191 }
1179 1192 } else {
1180 1193 __ BIND(L_copy_2_bytes);
1181 1194 }
1182 1195 if (t == T_OBJECT) {
1183 1196 __ movl2ptr(count, Address(rsp, 12+12)); // reread count
1184 1197 gen_write_ref_array_post_barrier(to, count);
1185 1198 __ BIND(L_0_count);
1186 1199 }
1187 1200 inc_copy_counter_np(t);
1188 1201 __ pop(rdi);
1189 1202 __ pop(rsi);
1190 1203 __ leave(); // required for proper stackwalking of RuntimeStub frame
1191 1204 __ xorptr(rax, rax); // return 0
1192 1205 __ ret(0);
1193 1206 return start;
1194 1207 }
1195 1208
1196 1209
1197 1210 address generate_disjoint_long_copy(address* entry, const char *name) {
1198 1211 __ align(CodeEntryAlignment);
1199 1212 StubCodeMark mark(this, "StubRoutines", name);
1200 1213 address start = __ pc();
1201 1214
1202 1215 Label L_copy_8_bytes, L_copy_8_bytes_loop;
1203 1216 const Register from = rax; // source array address
1204 1217 const Register to = rdx; // destination array address
1205 1218 const Register count = rcx; // elements count
1206 1219 const Register to_from = rdx; // (to - from)
1207 1220
1208 1221 __ enter(); // required for proper stackwalking of RuntimeStub frame
1209 1222 __ movptr(from , Address(rsp, 8+0)); // from
1210 1223 __ movptr(to , Address(rsp, 8+4)); // to
1211 1224 __ movl2ptr(count, Address(rsp, 8+8)); // count
1212 1225
1213 1226 *entry = __ pc(); // Entry point from conjoint arraycopy stub.
1214 1227 BLOCK_COMMENT("Entry:");
1215 1228
1216 1229 __ subptr(to, from); // to --> to_from
1217 1230 if (VM_Version::supports_mmx()) {
1218 1231 if (UseXMMForArrayCopy) {
1219 1232 xmm_copy_forward(from, to_from, count);
1220 1233 } else {
1221 1234 mmx_copy_forward(from, to_from, count);
1222 1235 }
1223 1236 } else {
1224 1237 __ jmpb(L_copy_8_bytes);
1225 1238 __ align(16);
1226 1239 __ BIND(L_copy_8_bytes_loop);
1227 1240 __ fild_d(Address(from, 0));
1228 1241 __ fistp_d(Address(from, to_from, Address::times_1));
1229 1242 __ addptr(from, 8);
1230 1243 __ BIND(L_copy_8_bytes);
1231 1244 __ decrement(count);
1232 1245 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
1233 1246 }
1234 1247 inc_copy_counter_np(T_LONG);
1235 1248 __ leave(); // required for proper stackwalking of RuntimeStub frame
1236 1249 __ xorptr(rax, rax); // return 0
1237 1250 __ ret(0);
1238 1251 return start;
1239 1252 }
1240 1253
1241 1254 address generate_conjoint_long_copy(address nooverlap_target,
1242 1255 address* entry, const char *name) {
1243 1256 __ align(CodeEntryAlignment);
1244 1257 StubCodeMark mark(this, "StubRoutines", name);
1245 1258 address start = __ pc();
1246 1259
1247 1260 Label L_copy_8_bytes, L_copy_8_bytes_loop;
1248 1261 const Register from = rax; // source array address
1249 1262 const Register to = rdx; // destination array address
1250 1263 const Register count = rcx; // elements count
1251 1264 const Register end_from = rax; // source array end address
1252 1265
1253 1266 __ enter(); // required for proper stackwalking of RuntimeStub frame
1254 1267 __ movptr(from , Address(rsp, 8+0)); // from
1255 1268 __ movptr(to , Address(rsp, 8+4)); // to
1256 1269 __ movl2ptr(count, Address(rsp, 8+8)); // count
1257 1270
1258 1271 *entry = __ pc(); // Entry point from generic arraycopy stub.
1259 1272 BLOCK_COMMENT("Entry:");
1260 1273
1261 1274 // arrays overlap test
1262 1275 __ cmpptr(to, from);
1263 1276 RuntimeAddress nooverlap(nooverlap_target);
1264 1277 __ jump_cc(Assembler::belowEqual, nooverlap);
1265 1278 __ lea(end_from, Address(from, count, Address::times_8, 0));
1266 1279 __ cmpptr(to, end_from);
1267 1280 __ movptr(from, Address(rsp, 8)); // from
1268 1281 __ jump_cc(Assembler::aboveEqual, nooverlap);
1269 1282
1270 1283 __ jmpb(L_copy_8_bytes);
1271 1284
1272 1285 __ align(16);
1273 1286 __ BIND(L_copy_8_bytes_loop);
1274 1287 if (VM_Version::supports_mmx()) {
1275 1288 if (UseXMMForArrayCopy) {
1276 1289 __ movq(xmm0, Address(from, count, Address::times_8));
1277 1290 __ movq(Address(to, count, Address::times_8), xmm0);
1278 1291 } else {
1279 1292 __ movq(mmx0, Address(from, count, Address::times_8));
1280 1293 __ movq(Address(to, count, Address::times_8), mmx0);
1281 1294 }
1282 1295 } else {
1283 1296 __ fild_d(Address(from, count, Address::times_8));
1284 1297 __ fistp_d(Address(to, count, Address::times_8));
1285 1298 }
1286 1299 __ BIND(L_copy_8_bytes);
1287 1300 __ decrement(count);
1288 1301 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
1289 1302
1290 1303 if (VM_Version::supports_mmx() && !UseXMMForArrayCopy) {
1291 1304 __ emms();
1292 1305 }
1293 1306 inc_copy_counter_np(T_LONG);
1294 1307 __ leave(); // required for proper stackwalking of RuntimeStub frame
1295 1308 __ xorptr(rax, rax); // return 0
1296 1309 __ ret(0);
1297 1310 return start;
1298 1311 }
1299 1312
1300 1313
1301 1314 // Helper for generating a dynamic type check.
1302 1315 // The sub_klass must be one of {rbx, rdx, rsi}.
1303 1316 // The temp is killed.
1304 1317 void generate_type_check(Register sub_klass,
1305 1318 Address& super_check_offset_addr,
1306 1319 Address& super_klass_addr,
1307 1320 Register temp,
1308 1321 Label* L_success, Label* L_failure) {
1309 1322 BLOCK_COMMENT("type_check:");
1310 1323
1311 1324 Label L_fallthrough;
1312 1325 #define LOCAL_JCC(assembler_con, label_ptr) \
1313 1326 if (label_ptr != NULL) __ jcc(assembler_con, *(label_ptr)); \
1314 1327 else __ jcc(assembler_con, L_fallthrough) /*omit semi*/
1315 1328
1316 1329 // The following is a strange variation of the fast path which requires
1317 1330 // one less register, because needed values are on the argument stack.
1318 1331 // __ check_klass_subtype_fast_path(sub_klass, *super_klass*, temp,
1319 1332 // L_success, L_failure, NULL);
1320 1333 assert_different_registers(sub_klass, temp);
1321 1334
1322 1335 int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
1323 1336 Klass::secondary_super_cache_offset_in_bytes());
1324 1337
1325 1338 // if the pointers are equal, we are done (e.g., String[] elements)
1326 1339 __ cmpptr(sub_klass, super_klass_addr);
1327 1340 LOCAL_JCC(Assembler::equal, L_success);
1328 1341
1329 1342 // check the supertype display:
1330 1343 __ movl2ptr(temp, super_check_offset_addr);
1331 1344 Address super_check_addr(sub_klass, temp, Address::times_1, 0);
1332 1345 __ movptr(temp, super_check_addr); // load displayed supertype
1333 1346 __ cmpptr(temp, super_klass_addr); // test the super type
1334 1347 LOCAL_JCC(Assembler::equal, L_success);
1335 1348
1336 1349 // if it was a primary super, we can just fail immediately
1337 1350 __ cmpl(super_check_offset_addr, sc_offset);
1338 1351 LOCAL_JCC(Assembler::notEqual, L_failure);
1339 1352
1340 1353 // The repne_scan instruction uses fixed registers, which will get spilled.
1341 1354 // We happen to know this works best when super_klass is in rax.
1342 1355 Register super_klass = temp;
1343 1356 __ movptr(super_klass, super_klass_addr);
1344 1357 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg,
1345 1358 L_success, L_failure);
1346 1359
1347 1360 __ bind(L_fallthrough);
1348 1361
1349 1362 if (L_success == NULL) { BLOCK_COMMENT("L_success:"); }
1350 1363 if (L_failure == NULL) { BLOCK_COMMENT("L_failure:"); }
1351 1364
1352 1365 #undef LOCAL_JCC
1353 1366 }
1354 1367
1355 1368 //
1356 1369 // Generate checkcasting array copy stub
1357 1370 //
1358 1371 // Input:
1359 1372 // 4(rsp) - source array address
1360 1373 // 8(rsp) - destination array address
1361 1374 // 12(rsp) - element count, can be zero
1362 1375 // 16(rsp) - size_t ckoff (super_check_offset)
1363 1376 // 20(rsp) - oop ckval (super_klass)
1364 1377 //
1365 1378 // Output:
1366 1379 // rax, == 0 - success
1367 1380 // rax, == -1^K - failure, where K is partial transfer count
1368 1381 //
1369 1382 address generate_checkcast_copy(const char *name, address* entry) {
1370 1383 __ align(CodeEntryAlignment);
1371 1384 StubCodeMark mark(this, "StubRoutines", name);
1372 1385 address start = __ pc();
1373 1386
1374 1387 Label L_load_element, L_store_element, L_do_card_marks, L_done;
1375 1388
1376 1389 // register use:
1377 1390 // rax, rdx, rcx -- loop control (end_from, end_to, count)
1378 1391 // rdi, rsi -- element access (oop, klass)
1379 1392 // rbx, -- temp
1380 1393 const Register from = rax; // source array address
1381 1394 const Register to = rdx; // destination array address
1382 1395 const Register length = rcx; // elements count
1383 1396 const Register elem = rdi; // each oop copied
1384 1397 const Register elem_klass = rsi; // each elem._klass (sub_klass)
1385 1398 const Register temp = rbx; // lone remaining temp
1386 1399
1387 1400 __ enter(); // required for proper stackwalking of RuntimeStub frame
1388 1401
1389 1402 __ push(rsi);
1390 1403 __ push(rdi);
1391 1404 __ push(rbx);
1392 1405
1393 1406 Address from_arg(rsp, 16+ 4); // from
1394 1407 Address to_arg(rsp, 16+ 8); // to
1395 1408 Address length_arg(rsp, 16+12); // elements count
1396 1409 Address ckoff_arg(rsp, 16+16); // super_check_offset
1397 1410 Address ckval_arg(rsp, 16+20); // super_klass
1398 1411
1399 1412 // Load up:
1400 1413 __ movptr(from, from_arg);
1401 1414 __ movptr(to, to_arg);
1402 1415 __ movl2ptr(length, length_arg);
1403 1416
1404 1417 *entry = __ pc(); // Entry point from generic arraycopy stub.
1405 1418 BLOCK_COMMENT("Entry:");
1406 1419
1407 1420 //---------------------------------------------------------------
1408 1421 // Assembler stub will be used for this call to arraycopy
1409 1422 // if the two arrays are subtypes of Object[] but the
1410 1423 // destination array type is not equal to or a supertype
1411 1424 // of the source type. Each element must be separately
1412 1425 // checked.
1413 1426
1414 1427 // Loop-invariant addresses. They are exclusive end pointers.
1415 1428 Address end_from_addr(from, length, Address::times_ptr, 0);
1416 1429 Address end_to_addr(to, length, Address::times_ptr, 0);
1417 1430
1418 1431 Register end_from = from; // re-use
1419 1432 Register end_to = to; // re-use
1420 1433 Register count = length; // re-use
1421 1434
1422 1435 // Loop-variant addresses. They assume post-incremented count < 0.
1423 1436 Address from_element_addr(end_from, count, Address::times_ptr, 0);
1424 1437 Address to_element_addr(end_to, count, Address::times_ptr, 0);
1425 1438 Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes());
1426 1439
1427 1440 // Copy from low to high addresses, indexed from the end of each array.
1428 1441 gen_write_ref_array_pre_barrier(to, count);
1429 1442 __ lea(end_from, end_from_addr);
1430 1443 __ lea(end_to, end_to_addr);
1431 1444 assert(length == count, ""); // else fix next line:
1432 1445 __ negptr(count); // negate and test the length
1433 1446 __ jccb(Assembler::notZero, L_load_element);
1434 1447
1435 1448 // Empty array: Nothing to do.
1436 1449 __ xorptr(rax, rax); // return 0 on (trivial) success
1437 1450 __ jmp(L_done);
1438 1451
1439 1452 // ======== begin loop ========
1440 1453 // (Loop is rotated; its entry is L_load_element.)
1441 1454 // Loop control:
1442 1455 // for (count = -count; count != 0; count++)
1443 1456 // Base pointers src, dst are biased by 8*count,to last element.
1444 1457 __ align(16);
1445 1458
1446 1459 __ BIND(L_store_element);
1447 1460 __ movptr(to_element_addr, elem); // store the oop
1448 1461 __ increment(count); // increment the count toward zero
1449 1462 __ jccb(Assembler::zero, L_do_card_marks);
1450 1463
1451 1464 // ======== loop entry is here ========
1452 1465 __ BIND(L_load_element);
1453 1466 __ movptr(elem, from_element_addr); // load the oop
1454 1467 __ testptr(elem, elem);
1455 1468 __ jccb(Assembler::zero, L_store_element);
1456 1469
1457 1470 // (Could do a trick here: Remember last successful non-null
1458 1471 // element stored and make a quick oop equality check on it.)
1459 1472
1460 1473 __ movptr(elem_klass, elem_klass_addr); // query the object klass
1461 1474 generate_type_check(elem_klass, ckoff_arg, ckval_arg, temp,
1462 1475 &L_store_element, NULL);
1463 1476 // (On fall-through, we have failed the element type check.)
1464 1477 // ======== end loop ========
1465 1478
1466 1479 // It was a real error; we must depend on the caller to finish the job.
1467 1480 // Register "count" = -1 * number of *remaining* oops, length_arg = *total* oops.
1468 1481 // Emit GC store barriers for the oops we have copied (length_arg + count),
1469 1482 // and report their number to the caller.
1470 1483 __ addl(count, length_arg); // transfers = (length - remaining)
1471 1484 __ movl2ptr(rax, count); // save the value
1472 1485 __ notptr(rax); // report (-1^K) to caller
1473 1486 __ movptr(to, to_arg); // reload
1474 1487 assert_different_registers(to, count, rax);
1475 1488 gen_write_ref_array_post_barrier(to, count);
1476 1489 __ jmpb(L_done);
1477 1490
1478 1491 // Come here on success only.
1479 1492 __ BIND(L_do_card_marks);
1480 1493 __ movl2ptr(count, length_arg);
1481 1494 __ movptr(to, to_arg); // reload
1482 1495 gen_write_ref_array_post_barrier(to, count);
1483 1496 __ xorptr(rax, rax); // return 0 on success
1484 1497
1485 1498 // Common exit point (success or failure).
1486 1499 __ BIND(L_done);
1487 1500 __ pop(rbx);
1488 1501 __ pop(rdi);
1489 1502 __ pop(rsi);
1490 1503 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr);
1491 1504 __ leave(); // required for proper stackwalking of RuntimeStub frame
1492 1505 __ ret(0);
1493 1506
1494 1507 return start;
1495 1508 }
1496 1509
1497 1510 //
1498 1511 // Generate 'unsafe' array copy stub
1499 1512 // Though just as safe as the other stubs, it takes an unscaled
1500 1513 // size_t argument instead of an element count.
1501 1514 //
1502 1515 // Input:
1503 1516 // 4(rsp) - source array address
1504 1517 // 8(rsp) - destination array address
1505 1518 // 12(rsp) - byte count, can be zero
1506 1519 //
1507 1520 // Output:
1508 1521 // rax, == 0 - success
1509 1522 // rax, == -1 - need to call System.arraycopy
1510 1523 //
1511 1524 // Examines the alignment of the operands and dispatches
1512 1525 // to a long, int, short, or byte copy loop.
1513 1526 //
1514 1527 address generate_unsafe_copy(const char *name,
1515 1528 address byte_copy_entry,
1516 1529 address short_copy_entry,
1517 1530 address int_copy_entry,
1518 1531 address long_copy_entry) {
1519 1532
1520 1533 Label L_long_aligned, L_int_aligned, L_short_aligned;
1521 1534
1522 1535 __ align(CodeEntryAlignment);
1523 1536 StubCodeMark mark(this, "StubRoutines", name);
1524 1537 address start = __ pc();
1525 1538
1526 1539 const Register from = rax; // source array address
1527 1540 const Register to = rdx; // destination array address
1528 1541 const Register count = rcx; // elements count
1529 1542
1530 1543 __ enter(); // required for proper stackwalking of RuntimeStub frame
1531 1544 __ push(rsi);
1532 1545 __ push(rdi);
1533 1546 Address from_arg(rsp, 12+ 4); // from
1534 1547 Address to_arg(rsp, 12+ 8); // to
1535 1548 Address count_arg(rsp, 12+12); // byte count
1536 1549
1537 1550 // Load up:
1538 1551 __ movptr(from , from_arg);
1539 1552 __ movptr(to , to_arg);
1540 1553 __ movl2ptr(count, count_arg);
1541 1554
1542 1555 // bump this on entry, not on exit:
1543 1556 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr);
1544 1557
1545 1558 const Register bits = rsi;
1546 1559 __ mov(bits, from);
1547 1560 __ orptr(bits, to);
1548 1561 __ orptr(bits, count);
1549 1562
1550 1563 __ testl(bits, BytesPerLong-1);
1551 1564 __ jccb(Assembler::zero, L_long_aligned);
1552 1565
1553 1566 __ testl(bits, BytesPerInt-1);
1554 1567 __ jccb(Assembler::zero, L_int_aligned);
1555 1568
1556 1569 __ testl(bits, BytesPerShort-1);
1557 1570 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry));
1558 1571
1559 1572 __ BIND(L_short_aligned);
1560 1573 __ shrptr(count, LogBytesPerShort); // size => short_count
1561 1574 __ movl(count_arg, count); // update 'count'
1562 1575 __ jump(RuntimeAddress(short_copy_entry));
1563 1576
1564 1577 __ BIND(L_int_aligned);
1565 1578 __ shrptr(count, LogBytesPerInt); // size => int_count
1566 1579 __ movl(count_arg, count); // update 'count'
1567 1580 __ jump(RuntimeAddress(int_copy_entry));
1568 1581
1569 1582 __ BIND(L_long_aligned);
1570 1583 __ shrptr(count, LogBytesPerLong); // size => qword_count
1571 1584 __ movl(count_arg, count); // update 'count'
1572 1585 __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it.
1573 1586 __ pop(rsi);
1574 1587 __ jump(RuntimeAddress(long_copy_entry));
1575 1588
1576 1589 return start;
1577 1590 }
1578 1591
1579 1592
1580 1593 // Perform range checks on the proposed arraycopy.
1581 1594 // Smashes src_pos and dst_pos. (Uses them up for temps.)
1582 1595 void arraycopy_range_checks(Register src,
1583 1596 Register src_pos,
1584 1597 Register dst,
1585 1598 Register dst_pos,
1586 1599 Address& length,
1587 1600 Label& L_failed) {
1588 1601 BLOCK_COMMENT("arraycopy_range_checks:");
1589 1602 const Register src_end = src_pos; // source array end position
1590 1603 const Register dst_end = dst_pos; // destination array end position
1591 1604 __ addl(src_end, length); // src_pos + length
1592 1605 __ addl(dst_end, length); // dst_pos + length
1593 1606
1594 1607 // if (src_pos + length > arrayOop(src)->length() ) FAIL;
1595 1608 __ cmpl(src_end, Address(src, arrayOopDesc::length_offset_in_bytes()));
1596 1609 __ jcc(Assembler::above, L_failed);
1597 1610
1598 1611 // if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
1599 1612 __ cmpl(dst_end, Address(dst, arrayOopDesc::length_offset_in_bytes()));
1600 1613 __ jcc(Assembler::above, L_failed);
1601 1614
1602 1615 BLOCK_COMMENT("arraycopy_range_checks done");
1603 1616 }
1604 1617
1605 1618
1606 1619 //
1607 1620 // Generate generic array copy stubs
1608 1621 //
1609 1622 // Input:
1610 1623 // 4(rsp) - src oop
1611 1624 // 8(rsp) - src_pos
1612 1625 // 12(rsp) - dst oop
1613 1626 // 16(rsp) - dst_pos
1614 1627 // 20(rsp) - element count
1615 1628 //
1616 1629 // Output:
1617 1630 // rax, == 0 - success
1618 1631 // rax, == -1^K - failure, where K is partial transfer count
1619 1632 //
1620 1633 address generate_generic_copy(const char *name,
1621 1634 address entry_jbyte_arraycopy,
1622 1635 address entry_jshort_arraycopy,
1623 1636 address entry_jint_arraycopy,
1624 1637 address entry_oop_arraycopy,
1625 1638 address entry_jlong_arraycopy,
1626 1639 address entry_checkcast_arraycopy) {
1627 1640 Label L_failed, L_failed_0, L_objArray;
1628 1641
1629 1642 { int modulus = CodeEntryAlignment;
1630 1643 int target = modulus - 5; // 5 = sizeof jmp(L_failed)
1631 1644 int advance = target - (__ offset() % modulus);
1632 1645 if (advance < 0) advance += modulus;
1633 1646 if (advance > 0) __ nop(advance);
1634 1647 }
1635 1648 StubCodeMark mark(this, "StubRoutines", name);
1636 1649
1637 1650 // Short-hop target to L_failed. Makes for denser prologue code.
1638 1651 __ BIND(L_failed_0);
1639 1652 __ jmp(L_failed);
1640 1653 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed");
1641 1654
1642 1655 __ align(CodeEntryAlignment);
1643 1656 address start = __ pc();
1644 1657
1645 1658 __ enter(); // required for proper stackwalking of RuntimeStub frame
1646 1659 __ push(rsi);
1647 1660 __ push(rdi);
1648 1661
1649 1662 // bump this on entry, not on exit:
1650 1663 inc_counter_np(SharedRuntime::_generic_array_copy_ctr);
1651 1664
1652 1665 // Input values
1653 1666 Address SRC (rsp, 12+ 4);
1654 1667 Address SRC_POS (rsp, 12+ 8);
1655 1668 Address DST (rsp, 12+12);
1656 1669 Address DST_POS (rsp, 12+16);
1657 1670 Address LENGTH (rsp, 12+20);
1658 1671
1659 1672 //-----------------------------------------------------------------------
1660 1673 // Assembler stub will be used for this call to arraycopy
1661 1674 // if the following conditions are met:
1662 1675 //
1663 1676 // (1) src and dst must not be null.
1664 1677 // (2) src_pos must not be negative.
1665 1678 // (3) dst_pos must not be negative.
1666 1679 // (4) length must not be negative.
1667 1680 // (5) src klass and dst klass should be the same and not NULL.
1668 1681 // (6) src and dst should be arrays.
1669 1682 // (7) src_pos + length must not exceed length of src.
1670 1683 // (8) dst_pos + length must not exceed length of dst.
1671 1684 //
1672 1685
1673 1686 const Register src = rax; // source array oop
1674 1687 const Register src_pos = rsi;
1675 1688 const Register dst = rdx; // destination array oop
1676 1689 const Register dst_pos = rdi;
1677 1690 const Register length = rcx; // transfer count
1678 1691
1679 1692 // if (src == NULL) return -1;
1680 1693 __ movptr(src, SRC); // src oop
1681 1694 __ testptr(src, src);
1682 1695 __ jccb(Assembler::zero, L_failed_0);
1683 1696
1684 1697 // if (src_pos < 0) return -1;
1685 1698 __ movl2ptr(src_pos, SRC_POS); // src_pos
1686 1699 __ testl(src_pos, src_pos);
1687 1700 __ jccb(Assembler::negative, L_failed_0);
1688 1701
1689 1702 // if (dst == NULL) return -1;
1690 1703 __ movptr(dst, DST); // dst oop
1691 1704 __ testptr(dst, dst);
1692 1705 __ jccb(Assembler::zero, L_failed_0);
1693 1706
1694 1707 // if (dst_pos < 0) return -1;
1695 1708 __ movl2ptr(dst_pos, DST_POS); // dst_pos
1696 1709 __ testl(dst_pos, dst_pos);
1697 1710 __ jccb(Assembler::negative, L_failed_0);
1698 1711
1699 1712 // if (length < 0) return -1;
1700 1713 __ movl2ptr(length, LENGTH); // length
1701 1714 __ testl(length, length);
1702 1715 __ jccb(Assembler::negative, L_failed_0);
1703 1716
1704 1717 // if (src->klass() == NULL) return -1;
1705 1718 Address src_klass_addr(src, oopDesc::klass_offset_in_bytes());
1706 1719 Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes());
1707 1720 const Register rcx_src_klass = rcx; // array klass
1708 1721 __ movptr(rcx_src_klass, Address(src, oopDesc::klass_offset_in_bytes()));
1709 1722
1710 1723 #ifdef ASSERT
1711 1724 // assert(src->klass() != NULL);
1712 1725 BLOCK_COMMENT("assert klasses not null");
1713 1726 { Label L1, L2;
1714 1727 __ testptr(rcx_src_klass, rcx_src_klass);
1715 1728 __ jccb(Assembler::notZero, L2); // it is broken if klass is NULL
1716 1729 __ bind(L1);
1717 1730 __ stop("broken null klass");
1718 1731 __ bind(L2);
1719 1732 __ cmpptr(dst_klass_addr, (int32_t)NULL_WORD);
1720 1733 __ jccb(Assembler::equal, L1); // this would be broken also
1721 1734 BLOCK_COMMENT("assert done");
1722 1735 }
1723 1736 #endif //ASSERT
1724 1737
1725 1738 // Load layout helper (32-bits)
1726 1739 //
1727 1740 // |array_tag| | header_size | element_type | |log2_element_size|
1728 1741 // 32 30 24 16 8 2 0
1729 1742 //
1730 1743 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
1731 1744 //
1732 1745
1733 1746 int lh_offset = klassOopDesc::header_size() * HeapWordSize +
1734 1747 Klass::layout_helper_offset_in_bytes();
1735 1748 Address src_klass_lh_addr(rcx_src_klass, lh_offset);
1736 1749
1737 1750 // Handle objArrays completely differently...
1738 1751 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
1739 1752 __ cmpl(src_klass_lh_addr, objArray_lh);
1740 1753 __ jcc(Assembler::equal, L_objArray);
1741 1754
1742 1755 // if (src->klass() != dst->klass()) return -1;
1743 1756 __ cmpptr(rcx_src_klass, dst_klass_addr);
1744 1757 __ jccb(Assembler::notEqual, L_failed_0);
1745 1758
1746 1759 const Register rcx_lh = rcx; // layout helper
1747 1760 assert(rcx_lh == rcx_src_klass, "known alias");
1748 1761 __ movl(rcx_lh, src_klass_lh_addr);
1749 1762
1750 1763 // if (!src->is_Array()) return -1;
1751 1764 __ cmpl(rcx_lh, Klass::_lh_neutral_value);
1752 1765 __ jcc(Assembler::greaterEqual, L_failed_0); // signed cmp
1753 1766
1754 1767 // At this point, it is known to be a typeArray (array_tag 0x3).
1755 1768 #ifdef ASSERT
1756 1769 { Label L;
1757 1770 __ cmpl(rcx_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift));
1758 1771 __ jcc(Assembler::greaterEqual, L); // signed cmp
1759 1772 __ stop("must be a primitive array");
1760 1773 __ bind(L);
1761 1774 }
1762 1775 #endif
1763 1776
1764 1777 assert_different_registers(src, src_pos, dst, dst_pos, rcx_lh);
1765 1778 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed);
1766 1779
1767 1780 // typeArrayKlass
1768 1781 //
1769 1782 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
1770 1783 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
1771 1784 //
1772 1785 const Register rsi_offset = rsi; // array offset
1773 1786 const Register src_array = src; // src array offset
1774 1787 const Register dst_array = dst; // dst array offset
1775 1788 const Register rdi_elsize = rdi; // log2 element size
1776 1789
1777 1790 __ mov(rsi_offset, rcx_lh);
1778 1791 __ shrptr(rsi_offset, Klass::_lh_header_size_shift);
1779 1792 __ andptr(rsi_offset, Klass::_lh_header_size_mask); // array_offset
1780 1793 __ addptr(src_array, rsi_offset); // src array offset
1781 1794 __ addptr(dst_array, rsi_offset); // dst array offset
1782 1795 __ andptr(rcx_lh, Klass::_lh_log2_element_size_mask); // log2 elsize
1783 1796
1784 1797 // next registers should be set before the jump to corresponding stub
1785 1798 const Register from = src; // source array address
1786 1799 const Register to = dst; // destination array address
1787 1800 const Register count = rcx; // elements count
1788 1801 // some of them should be duplicated on stack
1789 1802 #define FROM Address(rsp, 12+ 4)
1790 1803 #define TO Address(rsp, 12+ 8) // Not used now
1791 1804 #define COUNT Address(rsp, 12+12) // Only for oop arraycopy
1792 1805
1793 1806 BLOCK_COMMENT("scale indexes to element size");
1794 1807 __ movl2ptr(rsi, SRC_POS); // src_pos
1795 1808 __ shlptr(rsi); // src_pos << rcx (log2 elsize)
1796 1809 assert(src_array == from, "");
1797 1810 __ addptr(from, rsi); // from = src_array + SRC_POS << log2 elsize
1798 1811 __ movl2ptr(rdi, DST_POS); // dst_pos
1799 1812 __ shlptr(rdi); // dst_pos << rcx (log2 elsize)
1800 1813 assert(dst_array == to, "");
1801 1814 __ addptr(to, rdi); // to = dst_array + DST_POS << log2 elsize
1802 1815 __ movptr(FROM, from); // src_addr
1803 1816 __ mov(rdi_elsize, rcx_lh); // log2 elsize
1804 1817 __ movl2ptr(count, LENGTH); // elements count
1805 1818
1806 1819 BLOCK_COMMENT("choose copy loop based on element size");
1807 1820 __ cmpl(rdi_elsize, 0);
1808 1821
1809 1822 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jbyte_arraycopy));
1810 1823 __ cmpl(rdi_elsize, LogBytesPerShort);
1811 1824 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jshort_arraycopy));
1812 1825 __ cmpl(rdi_elsize, LogBytesPerInt);
1813 1826 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jint_arraycopy));
1814 1827 #ifdef ASSERT
1815 1828 __ cmpl(rdi_elsize, LogBytesPerLong);
1816 1829 __ jccb(Assembler::notEqual, L_failed);
1817 1830 #endif
1818 1831 __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it.
1819 1832 __ pop(rsi);
1820 1833 __ jump(RuntimeAddress(entry_jlong_arraycopy));
1821 1834
1822 1835 __ BIND(L_failed);
1823 1836 __ xorptr(rax, rax);
1824 1837 __ notptr(rax); // return -1
1825 1838 __ pop(rdi);
1826 1839 __ pop(rsi);
1827 1840 __ leave(); // required for proper stackwalking of RuntimeStub frame
1828 1841 __ ret(0);
1829 1842
1830 1843 // objArrayKlass
1831 1844 __ BIND(L_objArray);
1832 1845 // live at this point: rcx_src_klass, src[_pos], dst[_pos]
1833 1846
1834 1847 Label L_plain_copy, L_checkcast_copy;
1835 1848 // test array classes for subtyping
1836 1849 __ cmpptr(rcx_src_klass, dst_klass_addr); // usual case is exact equality
1837 1850 __ jccb(Assembler::notEqual, L_checkcast_copy);
1838 1851
1839 1852 // Identically typed arrays can be copied without element-wise checks.
1840 1853 assert_different_registers(src, src_pos, dst, dst_pos, rcx_src_klass);
1841 1854 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed);
1842 1855
1843 1856 __ BIND(L_plain_copy);
1844 1857 __ movl2ptr(count, LENGTH); // elements count
1845 1858 __ movl2ptr(src_pos, SRC_POS); // reload src_pos
1846 1859 __ lea(from, Address(src, src_pos, Address::times_ptr,
1847 1860 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
1848 1861 __ movl2ptr(dst_pos, DST_POS); // reload dst_pos
1849 1862 __ lea(to, Address(dst, dst_pos, Address::times_ptr,
1850 1863 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
1851 1864 __ movptr(FROM, from); // src_addr
1852 1865 __ movptr(TO, to); // dst_addr
1853 1866 __ movl(COUNT, count); // count
1854 1867 __ jump(RuntimeAddress(entry_oop_arraycopy));
1855 1868
1856 1869 __ BIND(L_checkcast_copy);
1857 1870 // live at this point: rcx_src_klass, dst[_pos], src[_pos]
1858 1871 {
1859 1872 // Handy offsets:
1860 1873 int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
1861 1874 objArrayKlass::element_klass_offset_in_bytes());
1862 1875 int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
1863 1876 Klass::super_check_offset_offset_in_bytes());
1864 1877
1865 1878 Register rsi_dst_klass = rsi;
1866 1879 Register rdi_temp = rdi;
1867 1880 assert(rsi_dst_klass == src_pos, "expected alias w/ src_pos");
1868 1881 assert(rdi_temp == dst_pos, "expected alias w/ dst_pos");
1869 1882 Address dst_klass_lh_addr(rsi_dst_klass, lh_offset);
1870 1883
1871 1884 // Before looking at dst.length, make sure dst is also an objArray.
1872 1885 __ movptr(rsi_dst_klass, dst_klass_addr);
1873 1886 __ cmpl(dst_klass_lh_addr, objArray_lh);
1874 1887 __ jccb(Assembler::notEqual, L_failed);
1875 1888
1876 1889 // It is safe to examine both src.length and dst.length.
1877 1890 __ movl2ptr(src_pos, SRC_POS); // reload rsi
1878 1891 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed);
1879 1892 // (Now src_pos and dst_pos are killed, but not src and dst.)
1880 1893
1881 1894 // We'll need this temp (don't forget to pop it after the type check).
1882 1895 __ push(rbx);
1883 1896 Register rbx_src_klass = rbx;
1884 1897
1885 1898 __ mov(rbx_src_klass, rcx_src_klass); // spill away from rcx
1886 1899 __ movptr(rsi_dst_klass, dst_klass_addr);
1887 1900 Address super_check_offset_addr(rsi_dst_klass, sco_offset);
1888 1901 Label L_fail_array_check;
1889 1902 generate_type_check(rbx_src_klass,
1890 1903 super_check_offset_addr, dst_klass_addr,
1891 1904 rdi_temp, NULL, &L_fail_array_check);
1892 1905 // (On fall-through, we have passed the array type check.)
1893 1906 __ pop(rbx);
1894 1907 __ jmp(L_plain_copy);
1895 1908
1896 1909 __ BIND(L_fail_array_check);
1897 1910 // Reshuffle arguments so we can call checkcast_arraycopy:
1898 1911
1899 1912 // match initial saves for checkcast_arraycopy
1900 1913 // push(rsi); // already done; see above
1901 1914 // push(rdi); // already done; see above
1902 1915 // push(rbx); // already done; see above
1903 1916
1904 1917 // Marshal outgoing arguments now, freeing registers.
1905 1918 Address from_arg(rsp, 16+ 4); // from
1906 1919 Address to_arg(rsp, 16+ 8); // to
1907 1920 Address length_arg(rsp, 16+12); // elements count
1908 1921 Address ckoff_arg(rsp, 16+16); // super_check_offset
1909 1922 Address ckval_arg(rsp, 16+20); // super_klass
1910 1923
1911 1924 Address SRC_POS_arg(rsp, 16+ 8);
1912 1925 Address DST_POS_arg(rsp, 16+16);
1913 1926 Address LENGTH_arg(rsp, 16+20);
1914 1927 // push rbx, changed the incoming offsets (why not just use rbp,??)
1915 1928 // assert(SRC_POS_arg.disp() == SRC_POS.disp() + 4, "");
1916 1929
1917 1930 __ movptr(rbx, Address(rsi_dst_klass, ek_offset));
1918 1931 __ movl2ptr(length, LENGTH_arg); // reload elements count
1919 1932 __ movl2ptr(src_pos, SRC_POS_arg); // reload src_pos
1920 1933 __ movl2ptr(dst_pos, DST_POS_arg); // reload dst_pos
1921 1934
1922 1935 __ movptr(ckval_arg, rbx); // destination element type
1923 1936 __ movl(rbx, Address(rbx, sco_offset));
1924 1937 __ movl(ckoff_arg, rbx); // corresponding class check offset
1925 1938
1926 1939 __ movl(length_arg, length); // outgoing length argument
1927 1940
1928 1941 __ lea(from, Address(src, src_pos, Address::times_ptr,
1929 1942 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
1930 1943 __ movptr(from_arg, from);
1931 1944
1932 1945 __ lea(to, Address(dst, dst_pos, Address::times_ptr,
1933 1946 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
1934 1947 __ movptr(to_arg, to);
1935 1948 __ jump(RuntimeAddress(entry_checkcast_arraycopy));
1936 1949 }
1937 1950
1938 1951 return start;
1939 1952 }
1940 1953
1941 1954 void generate_arraycopy_stubs() {
1942 1955 address entry;
1943 1956 address entry_jbyte_arraycopy;
1944 1957 address entry_jshort_arraycopy;
1945 1958 address entry_jint_arraycopy;
1946 1959 address entry_oop_arraycopy;
1947 1960 address entry_jlong_arraycopy;
1948 1961 address entry_checkcast_arraycopy;
1949 1962
1950 1963 StubRoutines::_arrayof_jbyte_disjoint_arraycopy =
1951 1964 generate_disjoint_copy(T_BYTE, true, Address::times_1, &entry,
1952 1965 "arrayof_jbyte_disjoint_arraycopy");
1953 1966 StubRoutines::_arrayof_jbyte_arraycopy =
1954 1967 generate_conjoint_copy(T_BYTE, true, Address::times_1, entry,
1955 1968 NULL, "arrayof_jbyte_arraycopy");
1956 1969 StubRoutines::_jbyte_disjoint_arraycopy =
1957 1970 generate_disjoint_copy(T_BYTE, false, Address::times_1, &entry,
1958 1971 "jbyte_disjoint_arraycopy");
1959 1972 StubRoutines::_jbyte_arraycopy =
1960 1973 generate_conjoint_copy(T_BYTE, false, Address::times_1, entry,
1961 1974 &entry_jbyte_arraycopy, "jbyte_arraycopy");
1962 1975
1963 1976 StubRoutines::_arrayof_jshort_disjoint_arraycopy =
1964 1977 generate_disjoint_copy(T_SHORT, true, Address::times_2, &entry,
1965 1978 "arrayof_jshort_disjoint_arraycopy");
1966 1979 StubRoutines::_arrayof_jshort_arraycopy =
1967 1980 generate_conjoint_copy(T_SHORT, true, Address::times_2, entry,
1968 1981 NULL, "arrayof_jshort_arraycopy");
1969 1982 StubRoutines::_jshort_disjoint_arraycopy =
1970 1983 generate_disjoint_copy(T_SHORT, false, Address::times_2, &entry,
1971 1984 "jshort_disjoint_arraycopy");
1972 1985 StubRoutines::_jshort_arraycopy =
1973 1986 generate_conjoint_copy(T_SHORT, false, Address::times_2, entry,
1974 1987 &entry_jshort_arraycopy, "jshort_arraycopy");
1975 1988
1976 1989 // Next arrays are always aligned on 4 bytes at least.
1977 1990 StubRoutines::_jint_disjoint_arraycopy =
1978 1991 generate_disjoint_copy(T_INT, true, Address::times_4, &entry,
1979 1992 "jint_disjoint_arraycopy");
1980 1993 StubRoutines::_jint_arraycopy =
1981 1994 generate_conjoint_copy(T_INT, true, Address::times_4, entry,
1982 1995 &entry_jint_arraycopy, "jint_arraycopy");
1983 1996
1984 1997 StubRoutines::_oop_disjoint_arraycopy =
1985 1998 generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry,
1986 1999 "oop_disjoint_arraycopy");
1987 2000 StubRoutines::_oop_arraycopy =
1988 2001 generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry,
1989 2002 &entry_oop_arraycopy, "oop_arraycopy");
1990 2003
1991 2004 StubRoutines::_jlong_disjoint_arraycopy =
1992 2005 generate_disjoint_long_copy(&entry, "jlong_disjoint_arraycopy");
1993 2006 StubRoutines::_jlong_arraycopy =
1994 2007 generate_conjoint_long_copy(entry, &entry_jlong_arraycopy,
1995 2008 "jlong_arraycopy");
1996 2009
1997 2010 StubRoutines::_arrayof_jint_disjoint_arraycopy =
1998 2011 StubRoutines::_jint_disjoint_arraycopy;
1999 2012 StubRoutines::_arrayof_oop_disjoint_arraycopy =
2000 2013 StubRoutines::_oop_disjoint_arraycopy;
2001 2014 StubRoutines::_arrayof_jlong_disjoint_arraycopy =
2002 2015 StubRoutines::_jlong_disjoint_arraycopy;
2003 2016
2004 2017 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy;
2005 2018 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy;
2006 2019 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy;
2007 2020
2008 2021 StubRoutines::_checkcast_arraycopy =
2009 2022 generate_checkcast_copy("checkcast_arraycopy",
2010 2023 &entry_checkcast_arraycopy);
2011 2024
2012 2025 StubRoutines::_unsafe_arraycopy =
2013 2026 generate_unsafe_copy("unsafe_arraycopy",
2014 2027 entry_jbyte_arraycopy,
2015 2028 entry_jshort_arraycopy,
2016 2029 entry_jint_arraycopy,
2017 2030 entry_jlong_arraycopy);
2018 2031
2019 2032 StubRoutines::_generic_arraycopy =
2020 2033 generate_generic_copy("generic_arraycopy",
2021 2034 entry_jbyte_arraycopy,
2022 2035 entry_jshort_arraycopy,
2023 2036 entry_jint_arraycopy,
2024 2037 entry_oop_arraycopy,
2025 2038 entry_jlong_arraycopy,
2026 2039 entry_checkcast_arraycopy);
2027 2040 }
2028 2041
2029 2042 void generate_math_stubs() {
2030 2043 {
2031 2044 StubCodeMark mark(this, "StubRoutines", "log");
2032 2045 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc();
2033 2046
2034 2047 __ fld_d(Address(rsp, 4));
2035 2048 __ flog();
2036 2049 __ ret(0);
2037 2050 }
2038 2051 {
2039 2052 StubCodeMark mark(this, "StubRoutines", "log10");
2040 2053 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc();
2041 2054
2042 2055 __ fld_d(Address(rsp, 4));
2043 2056 __ flog10();
2044 2057 __ ret(0);
2045 2058 }
2046 2059 {
2047 2060 StubCodeMark mark(this, "StubRoutines", "sin");
2048 2061 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc();
2049 2062
2050 2063 __ fld_d(Address(rsp, 4));
2051 2064 __ trigfunc('s');
2052 2065 __ ret(0);
2053 2066 }
2054 2067 {
2055 2068 StubCodeMark mark(this, "StubRoutines", "cos");
2056 2069 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc();
2057 2070
2058 2071 __ fld_d(Address(rsp, 4));
2059 2072 __ trigfunc('c');
2060 2073 __ ret(0);
2061 2074 }
2062 2075 {
2063 2076 StubCodeMark mark(this, "StubRoutines", "tan");
2064 2077 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc();
2065 2078
2066 2079 __ fld_d(Address(rsp, 4));
2067 2080 __ trigfunc('t');
2068 2081 __ ret(0);
2069 2082 }
2070 2083
2071 2084 // The intrinsic version of these seem to return the same value as
2072 2085 // the strict version.
2073 2086 StubRoutines::_intrinsic_exp = SharedRuntime::dexp;
2074 2087 StubRoutines::_intrinsic_pow = SharedRuntime::dpow;
2075 2088 }
2076 2089
2077 2090 public:
2078 2091 // Information about frame layout at time of blocking runtime call.
2079 2092 // Note that we only have to preserve callee-saved registers since
2080 2093 // the compilers are responsible for supplying a continuation point
2081 2094 // if they expect all registers to be preserved.
2082 2095 enum layout {
2083 2096 thread_off, // last_java_sp
2084 2097 rbp_off, // callee saved register
2085 2098 ret_pc,
2086 2099 framesize
2087 2100 };
2088 2101
2089 2102 private:
2090 2103
2091 2104 #undef __
2092 2105 #define __ masm->
2093 2106
2094 2107 //------------------------------------------------------------------------------------------------------------------------
2095 2108 // Continuation point for throwing of implicit exceptions that are not handled in
2096 2109 // the current activation. Fabricates an exception oop and initiates normal
2097 2110 // exception dispatching in this frame.
2098 2111 //
2099 2112 // Previously the compiler (c2) allowed for callee save registers on Java calls.
2100 2113 // This is no longer true after adapter frames were removed but could possibly
2101 2114 // be brought back in the future if the interpreter code was reworked and it
2102 2115 // was deemed worthwhile. The comment below was left to describe what must
2103 2116 // happen here if callee saves were resurrected. As it stands now this stub
2104 2117 // could actually be a vanilla BufferBlob and have now oopMap at all.
2105 2118 // Since it doesn't make much difference we've chosen to leave it the
2106 2119 // way it was in the callee save days and keep the comment.
2107 2120
2108 2121 // If we need to preserve callee-saved values we need a callee-saved oop map and
2109 2122 // therefore have to make these stubs into RuntimeStubs rather than BufferBlobs.
2110 2123 // If the compiler needs all registers to be preserved between the fault
2111 2124 // point and the exception handler then it must assume responsibility for that in
2112 2125 // AbstractCompiler::continuation_for_implicit_null_exception or
2113 2126 // continuation_for_implicit_division_by_zero_exception. All other implicit
2114 2127 // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are
2115 2128 // either at call sites or otherwise assume that stack unwinding will be initiated,
2116 2129 // so caller saved registers were assumed volatile in the compiler.
2117 2130 address generate_throw_exception(const char* name, address runtime_entry,
2118 2131 bool restore_saved_exception_pc) {
2119 2132
2120 2133 int insts_size = 256;
2121 2134 int locs_size = 32;
2122 2135
2123 2136 CodeBuffer code(name, insts_size, locs_size);
2124 2137 OopMapSet* oop_maps = new OopMapSet();
2125 2138 MacroAssembler* masm = new MacroAssembler(&code);
2126 2139
2127 2140 address start = __ pc();
2128 2141
2129 2142 // This is an inlined and slightly modified version of call_VM
2130 2143 // which has the ability to fetch the return PC out of
2131 2144 // thread-local storage and also sets up last_Java_sp slightly
2132 2145 // differently than the real call_VM
2133 2146 Register java_thread = rbx;
2134 2147 __ get_thread(java_thread);
2135 2148 if (restore_saved_exception_pc) {
2136 2149 __ movptr(rax, Address(java_thread, in_bytes(JavaThread::saved_exception_pc_offset())));
2137 2150 __ push(rax);
2138 2151 }
2139 2152
2140 2153 __ enter(); // required for proper stackwalking of RuntimeStub frame
2141 2154
2142 2155 // pc and rbp, already pushed
2143 2156 __ subptr(rsp, (framesize-2) * wordSize); // prolog
2144 2157
2145 2158 // Frame is now completed as far as size and linkage.
2146 2159
2147 2160 int frame_complete = __ pc() - start;
2148 2161
2149 2162 // push java thread (becomes first argument of C function)
2150 2163 __ movptr(Address(rsp, thread_off * wordSize), java_thread);
2151 2164
2152 2165 // Set up last_Java_sp and last_Java_fp
2153 2166 __ set_last_Java_frame(java_thread, rsp, rbp, NULL);
2154 2167
2155 2168 // Call runtime
2156 2169 BLOCK_COMMENT("call runtime_entry");
2157 2170 __ call(RuntimeAddress(runtime_entry));
2158 2171 // Generate oop map
2159 2172 OopMap* map = new OopMap(framesize, 0);
2160 2173 oop_maps->add_gc_map(__ pc() - start, map);
2161 2174
2162 2175 // restore the thread (cannot use the pushed argument since arguments
2163 2176 // may be overwritten by C code generated by an optimizing compiler);
2164 2177 // however can use the register value directly if it is callee saved.
2165 2178 __ get_thread(java_thread);
2166 2179
2167 2180 __ reset_last_Java_frame(java_thread, true, false);
2168 2181
2169 2182 __ leave(); // required for proper stackwalking of RuntimeStub frame
2170 2183
2171 2184 // check for pending exceptions
2172 2185 #ifdef ASSERT
2173 2186 Label L;
2174 2187 __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
2175 2188 __ jcc(Assembler::notEqual, L);
2176 2189 __ should_not_reach_here();
2177 2190 __ bind(L);
2178 2191 #endif /* ASSERT */
2179 2192 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2180 2193
2181 2194
2182 2195 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, framesize, oop_maps, false);
2183 2196 return stub->entry_point();
2184 2197 }
2185 2198
2186 2199
2187 2200 void create_control_words() {
2188 2201 // Round to nearest, 53-bit mode, exceptions masked
2189 2202 StubRoutines::_fpu_cntrl_wrd_std = 0x027F;
2190 2203 // Round to zero, 53-bit mode, exception mased
2191 2204 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F;
2192 2205 // Round to nearest, 24-bit mode, exceptions masked
2193 2206 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F;
2194 2207 // Round to nearest, 64-bit mode, exceptions masked
2195 2208 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F;
2196 2209 // Round to nearest, 64-bit mode, exceptions masked
2197 2210 StubRoutines::_mxcsr_std = 0x1F80;
2198 2211 // Note: the following two constants are 80-bit values
2199 2212 // layout is critical for correct loading by FPU.
2200 2213 // Bias for strict fp multiply/divide
2201 2214 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000
2202 2215 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000;
2203 2216 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff;
2204 2217 // Un-Bias for strict fp multiply/divide
2205 2218 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000
2206 2219 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000;
2207 2220 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff;
2208 2221 }
2209 2222
2210 2223 //---------------------------------------------------------------------------
2211 2224 // Initialization
2212 2225
2213 2226 void generate_initial() {
2214 2227 // Generates all stubs and initializes the entry points
2215 2228
2216 2229 //------------------------------------------------------------------------------------------------------------------------
2217 2230 // entry points that exist in all platforms
2218 2231 // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than
2219 2232 // the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp.
2220 2233 StubRoutines::_forward_exception_entry = generate_forward_exception();
2221 2234
2222 2235 StubRoutines::_call_stub_entry =
2223 2236 generate_call_stub(StubRoutines::_call_stub_return_address);
2224 2237 // is referenced by megamorphic call
2225 2238 StubRoutines::_catch_exception_entry = generate_catch_exception();
2226 2239
2227 2240 // These are currently used by Solaris/Intel
2228 2241 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
2229 2242
2230 2243 StubRoutines::_handler_for_unsafe_access_entry =
2231 2244 generate_handler_for_unsafe_access();
2232 2245
2233 2246 // platform dependent
2234 2247 create_control_words();
2235 2248
2236 2249 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
2237 2250 StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = generate_verify_fpu_cntrl_wrd();
2238 2251 StubRoutines::_d2i_wrapper = generate_d2i_wrapper(T_INT,
2239 2252 CAST_FROM_FN_PTR(address, SharedRuntime::d2i));
2240 2253 StubRoutines::_d2l_wrapper = generate_d2i_wrapper(T_LONG,
2241 2254 CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
2242 2255 }
2243 2256
2244 2257
2245 2258 void generate_all() {
2246 2259 // Generates all stubs and initializes the entry points
2247 2260
2248 2261 // These entry points require SharedInfo::stack0 to be set up in non-core builds
2249 2262 // and need to be relocatable, so they each fabricate a RuntimeStub internally.
2250 2263 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false);
2251 2264 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
2252 2265 StubRoutines::_throw_ArithmeticException_entry = generate_throw_exception("ArithmeticException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_ArithmeticException), true);
2253 2266 StubRoutines::_throw_NullPointerException_entry = generate_throw_exception("NullPointerException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException), true);
2254 2267 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
2255 2268 StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
2256 2269
2257 2270 //------------------------------------------------------------------------------------------------------------------------
2258 2271 // entry points that are platform specific
2259 2272
2260 2273 // support for verify_oop (must happen after universe_init)
2261 2274 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
2262 2275
2263 2276 // arraycopy stubs used by compilers
2264 2277 generate_arraycopy_stubs();
2265 2278
2266 2279 // generic method handle stubs
2267 2280 if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
2268 2281 for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
2269 2282 ek < MethodHandles::_EK_LIMIT;
2270 2283 ek = MethodHandles::EntryKind(1 + (int)ek)) {
2271 2284 StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
2272 2285 MethodHandles::generate_method_handle_stub(_masm, ek);
2273 2286 }
2274 2287 }
2275 2288
2276 2289 generate_math_stubs();
2277 2290 }
2278 2291
2279 2292
2280 2293 public:
2281 2294 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
2282 2295 if (all) {
2283 2296 generate_all();
2284 2297 } else {
2285 2298 generate_initial();
2286 2299 }
2287 2300 }
2288 2301 }; // end class declaration
2289 2302
2290 2303
2291 2304 void StubGenerator_generate(CodeBuffer* code, bool all) {
2292 2305 StubGenerator g(code, all);
2293 2306 }
↓ open down ↓ |
1856 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX