Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/x86/vm/stubGenerator_x86_32.cpp
+++ new/src/cpu/x86/vm/stubGenerator_x86_32.cpp
1 1 /*
2 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
2 + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "asm/assembler.hpp"
27 27 #include "assembler_x86.inline.hpp"
28 28 #include "interpreter/interpreter.hpp"
29 29 #include "nativeInst_x86.hpp"
30 30 #include "oops/instanceOop.hpp"
31 31 #include "oops/methodOop.hpp"
32 32 #include "oops/objArrayKlass.hpp"
33 33 #include "oops/oop.inline.hpp"
34 34 #include "prims/methodHandles.hpp"
35 35 #include "runtime/frame.inline.hpp"
36 36 #include "runtime/handles.inline.hpp"
37 37 #include "runtime/sharedRuntime.hpp"
38 38 #include "runtime/stubCodeGenerator.hpp"
39 39 #include "runtime/stubRoutines.hpp"
40 40 #include "utilities/top.hpp"
41 41 #ifdef TARGET_OS_FAMILY_linux
42 42 # include "thread_linux.inline.hpp"
43 43 #endif
44 44 #ifdef TARGET_OS_FAMILY_solaris
45 45 # include "thread_solaris.inline.hpp"
46 46 #endif
47 47 #ifdef TARGET_OS_FAMILY_windows
48 48 # include "thread_windows.inline.hpp"
49 49 #endif
50 50 #ifdef COMPILER2
51 51 #include "opto/runtime.hpp"
52 52 #endif
53 53
54 54 // Declaration and definition of StubGenerator (no .hpp file).
55 55 // For a more detailed description of the stub routine structure
56 56 // see the comment in stubRoutines.hpp
57 57
58 58 #define __ _masm->
59 59 #define a__ ((Assembler*)_masm)->
60 60
61 61 #ifdef PRODUCT
62 62 #define BLOCK_COMMENT(str) /* nothing */
63 63 #else
64 64 #define BLOCK_COMMENT(str) __ block_comment(str)
65 65 #endif
66 66
67 67 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
68 68
69 69 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions
70 70 const int FPU_CNTRL_WRD_MASK = 0xFFFF;
71 71
72 72 // -------------------------------------------------------------------------------------------------------------------------
73 73 // Stub Code definitions
74 74
75 75 static address handle_unsafe_access() {
76 76 JavaThread* thread = JavaThread::current();
77 77 address pc = thread->saved_exception_pc();
78 78 // pc is the instruction which we must emulate
79 79 // doing a no-op is fine: return garbage from the load
80 80 // therefore, compute npc
81 81 address npc = Assembler::locate_next_instruction(pc);
82 82
83 83 // request an async exception
84 84 thread->set_pending_unsafe_access_error();
85 85
86 86 // return address of next instruction to execute
87 87 return npc;
88 88 }
89 89
90 90 class StubGenerator: public StubCodeGenerator {
91 91 private:
92 92
93 93 #ifdef PRODUCT
94 94 #define inc_counter_np(counter) (0)
95 95 #else
96 96 void inc_counter_np_(int& counter) {
97 97 __ incrementl(ExternalAddress((address)&counter));
98 98 }
99 99 #define inc_counter_np(counter) \
100 100 BLOCK_COMMENT("inc_counter " #counter); \
101 101 inc_counter_np_(counter);
102 102 #endif //PRODUCT
103 103
104 104 void inc_copy_counter_np(BasicType t) {
105 105 #ifndef PRODUCT
106 106 switch (t) {
107 107 case T_BYTE: inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); return;
108 108 case T_SHORT: inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); return;
109 109 case T_INT: inc_counter_np(SharedRuntime::_jint_array_copy_ctr); return;
110 110 case T_LONG: inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); return;
111 111 case T_OBJECT: inc_counter_np(SharedRuntime::_oop_array_copy_ctr); return;
112 112 }
113 113 ShouldNotReachHere();
114 114 #endif //PRODUCT
115 115 }
116 116
117 117 //------------------------------------------------------------------------------------------------------------------------
118 118 // Call stubs are used to call Java from C
119 119 //
120 120 // [ return_from_Java ] <--- rsp
121 121 // [ argument word n ]
122 122 // ...
123 123 // -N [ argument word 1 ]
124 124 // -7 [ Possible padding for stack alignment ]
125 125 // -6 [ Possible padding for stack alignment ]
126 126 // -5 [ Possible padding for stack alignment ]
127 127 // -4 [ mxcsr save ] <--- rsp_after_call
128 128 // -3 [ saved rbx, ]
129 129 // -2 [ saved rsi ]
130 130 // -1 [ saved rdi ]
131 131 // 0 [ saved rbp, ] <--- rbp,
132 132 // 1 [ return address ]
133 133 // 2 [ ptr. to call wrapper ]
134 134 // 3 [ result ]
135 135 // 4 [ result_type ]
136 136 // 5 [ method ]
137 137 // 6 [ entry_point ]
138 138 // 7 [ parameters ]
139 139 // 8 [ parameter_size ]
140 140 // 9 [ thread ]
141 141
142 142
143 143 address generate_call_stub(address& return_address) {
144 144 StubCodeMark mark(this, "StubRoutines", "call_stub");
145 145 address start = __ pc();
146 146
147 147 // stub code parameters / addresses
148 148 assert(frame::entry_frame_call_wrapper_offset == 2, "adjust this code");
149 149 bool sse_save = false;
150 150 const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_catch_exception()!
151 151 const int locals_count_in_bytes (4*wordSize);
152 152 const Address mxcsr_save (rbp, -4 * wordSize);
153 153 const Address saved_rbx (rbp, -3 * wordSize);
154 154 const Address saved_rsi (rbp, -2 * wordSize);
155 155 const Address saved_rdi (rbp, -1 * wordSize);
156 156 const Address result (rbp, 3 * wordSize);
157 157 const Address result_type (rbp, 4 * wordSize);
158 158 const Address method (rbp, 5 * wordSize);
159 159 const Address entry_point (rbp, 6 * wordSize);
160 160 const Address parameters (rbp, 7 * wordSize);
161 161 const Address parameter_size(rbp, 8 * wordSize);
162 162 const Address thread (rbp, 9 * wordSize); // same as in generate_catch_exception()!
163 163 sse_save = UseSSE > 0;
164 164
165 165 // stub code
166 166 __ enter();
167 167 __ movptr(rcx, parameter_size); // parameter counter
168 168 __ shlptr(rcx, Interpreter::logStackElementSize); // convert parameter count to bytes
169 169 __ addptr(rcx, locals_count_in_bytes); // reserve space for register saves
170 170 __ subptr(rsp, rcx);
171 171 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack
172 172
173 173 // save rdi, rsi, & rbx, according to C calling conventions
174 174 __ movptr(saved_rdi, rdi);
175 175 __ movptr(saved_rsi, rsi);
176 176 __ movptr(saved_rbx, rbx);
177 177 // save and initialize %mxcsr
178 178 if (sse_save) {
179 179 Label skip_ldmx;
180 180 __ stmxcsr(mxcsr_save);
181 181 __ movl(rax, mxcsr_save);
182 182 __ andl(rax, MXCSR_MASK); // Only check control and mask bits
183 183 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std());
184 184 __ cmp32(rax, mxcsr_std);
185 185 __ jcc(Assembler::equal, skip_ldmx);
186 186 __ ldmxcsr(mxcsr_std);
187 187 __ bind(skip_ldmx);
188 188 }
189 189
190 190 // make sure the control word is correct.
191 191 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
192 192
193 193 #ifdef ASSERT
194 194 // make sure we have no pending exceptions
195 195 { Label L;
196 196 __ movptr(rcx, thread);
197 197 __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
198 198 __ jcc(Assembler::equal, L);
199 199 __ stop("StubRoutines::call_stub: entered with pending exception");
200 200 __ bind(L);
201 201 }
202 202 #endif
203 203
204 204 // pass parameters if any
205 205 BLOCK_COMMENT("pass parameters if any");
206 206 Label parameters_done;
207 207 __ movl(rcx, parameter_size); // parameter counter
208 208 __ testl(rcx, rcx);
209 209 __ jcc(Assembler::zero, parameters_done);
210 210
211 211 // parameter passing loop
212 212
213 213 Label loop;
214 214 // Copy Java parameters in reverse order (receiver last)
215 215 // Note that the argument order is inverted in the process
216 216 // source is rdx[rcx: N-1..0]
217 217 // dest is rsp[rbx: 0..N-1]
218 218
219 219 __ movptr(rdx, parameters); // parameter pointer
220 220 __ xorptr(rbx, rbx);
221 221
222 222 __ BIND(loop);
223 223
224 224 // get parameter
225 225 __ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -wordSize));
226 226 __ movptr(Address(rsp, rbx, Interpreter::stackElementScale(),
227 227 Interpreter::expr_offset_in_bytes(0)), rax); // store parameter
228 228 __ increment(rbx);
229 229 __ decrement(rcx);
230 230 __ jcc(Assembler::notZero, loop);
231 231
232 232 // call Java function
233 233 __ BIND(parameters_done);
234 234 __ movptr(rbx, method); // get methodOop
235 235 __ movptr(rax, entry_point); // get entry_point
236 236 __ mov(rsi, rsp); // set sender sp
237 237 BLOCK_COMMENT("call Java function");
238 238 __ call(rax);
239 239
240 240 BLOCK_COMMENT("call_stub_return_address:");
241 241 return_address = __ pc();
242 242
243 243 Label common_return;
244 244
245 245 __ BIND(common_return);
246 246
247 247 // store result depending on type
248 248 // (everything that is not T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
249 249 __ movptr(rdi, result);
250 250 Label is_long, is_float, is_double, exit;
251 251 __ movl(rsi, result_type);
252 252 __ cmpl(rsi, T_LONG);
253 253 __ jcc(Assembler::equal, is_long);
254 254 __ cmpl(rsi, T_FLOAT);
255 255 __ jcc(Assembler::equal, is_float);
256 256 __ cmpl(rsi, T_DOUBLE);
257 257 __ jcc(Assembler::equal, is_double);
258 258
259 259 // handle T_INT case
260 260 __ movl(Address(rdi, 0), rax);
261 261 __ BIND(exit);
262 262
263 263 // check that FPU stack is empty
264 264 __ verify_FPU(0, "generate_call_stub");
265 265
266 266 // pop parameters
267 267 __ lea(rsp, rsp_after_call);
268 268
269 269 // restore %mxcsr
270 270 if (sse_save) {
271 271 __ ldmxcsr(mxcsr_save);
272 272 }
273 273
274 274 // restore rdi, rsi and rbx,
275 275 __ movptr(rbx, saved_rbx);
276 276 __ movptr(rsi, saved_rsi);
277 277 __ movptr(rdi, saved_rdi);
278 278 __ addptr(rsp, 4*wordSize);
279 279
280 280 // return
281 281 __ pop(rbp);
282 282 __ ret(0);
283 283
284 284 // handle return types different from T_INT
285 285 __ BIND(is_long);
286 286 __ movl(Address(rdi, 0 * wordSize), rax);
287 287 __ movl(Address(rdi, 1 * wordSize), rdx);
288 288 __ jmp(exit);
289 289
290 290 __ BIND(is_float);
291 291 // interpreter uses xmm0 for return values
292 292 if (UseSSE >= 1) {
293 293 __ movflt(Address(rdi, 0), xmm0);
294 294 } else {
295 295 __ fstp_s(Address(rdi, 0));
296 296 }
297 297 __ jmp(exit);
298 298
299 299 __ BIND(is_double);
300 300 // interpreter uses xmm0 for return values
301 301 if (UseSSE >= 2) {
302 302 __ movdbl(Address(rdi, 0), xmm0);
303 303 } else {
304 304 __ fstp_d(Address(rdi, 0));
305 305 }
306 306 __ jmp(exit);
307 307
308 308 // If we call compiled code directly from the call stub we will
309 309 // need to adjust the return back to the call stub to a specialized
310 310 // piece of code that can handle compiled results and cleaning the fpu
311 311 // stack. compiled code will be set to return here instead of the
312 312 // return above that handles interpreter returns.
313 313
314 314 BLOCK_COMMENT("call_stub_compiled_return:");
315 315 StubRoutines::x86::set_call_stub_compiled_return( __ pc());
316 316
317 317 #ifdef COMPILER2
318 318 if (UseSSE >= 2) {
319 319 __ verify_FPU(0, "call_stub_compiled_return");
320 320 } else {
321 321 for (int i = 1; i < 8; i++) {
322 322 __ ffree(i);
323 323 }
324 324
325 325 // UseSSE <= 1 so double result should be left on TOS
326 326 __ movl(rsi, result_type);
327 327 __ cmpl(rsi, T_DOUBLE);
328 328 __ jcc(Assembler::equal, common_return);
329 329 if (UseSSE == 0) {
330 330 // UseSSE == 0 so float result should be left on TOS
331 331 __ cmpl(rsi, T_FLOAT);
332 332 __ jcc(Assembler::equal, common_return);
333 333 }
334 334 __ ffree(0);
335 335 }
336 336 #endif /* COMPILER2 */
337 337 __ jmp(common_return);
338 338
339 339 return start;
340 340 }
341 341
342 342
343 343 //------------------------------------------------------------------------------------------------------------------------
344 344 // Return point for a Java call if there's an exception thrown in Java code.
345 345 // The exception is caught and transformed into a pending exception stored in
346 346 // JavaThread that can be tested from within the VM.
347 347 //
348 348 // Note: Usually the parameters are removed by the callee. In case of an exception
349 349 // crossing an activation frame boundary, that is not the case if the callee
350 350 // is compiled code => need to setup the rsp.
351 351 //
352 352 // rax,: exception oop
353 353
354 354 address generate_catch_exception() {
355 355 StubCodeMark mark(this, "StubRoutines", "catch_exception");
356 356 const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_call_stub()!
357 357 const Address thread (rbp, 9 * wordSize); // same as in generate_call_stub()!
358 358 address start = __ pc();
359 359
360 360 // get thread directly
361 361 __ movptr(rcx, thread);
362 362 #ifdef ASSERT
363 363 // verify that threads correspond
364 364 { Label L;
365 365 __ get_thread(rbx);
366 366 __ cmpptr(rbx, rcx);
367 367 __ jcc(Assembler::equal, L);
368 368 __ stop("StubRoutines::catch_exception: threads must correspond");
369 369 __ bind(L);
370 370 }
371 371 #endif
372 372 // set pending exception
373 373 __ verify_oop(rax);
374 374 __ movptr(Address(rcx, Thread::pending_exception_offset()), rax );
375 375 __ lea(Address(rcx, Thread::exception_file_offset ()),
376 376 ExternalAddress((address)__FILE__));
377 377 __ movl(Address(rcx, Thread::exception_line_offset ()), __LINE__ );
378 378 // complete return to VM
379 379 assert(StubRoutines::_call_stub_return_address != NULL, "_call_stub_return_address must have been generated before");
380 380 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address));
381 381
382 382 return start;
383 383 }
384 384
385 385
386 386 //------------------------------------------------------------------------------------------------------------------------
387 387 // Continuation point for runtime calls returning with a pending exception.
388 388 // The pending exception check happened in the runtime or native call stub.
389 389 // The pending exception in Thread is converted into a Java-level exception.
390 390 //
391 391 // Contract with Java-level exception handlers:
392 392 // rax: exception
393 393 // rdx: throwing pc
394 394 //
395 395 // NOTE: At entry of this stub, exception-pc must be on stack !!
396 396
397 397 address generate_forward_exception() {
398 398 StubCodeMark mark(this, "StubRoutines", "forward exception");
399 399 address start = __ pc();
400 400 const Register thread = rcx;
401 401
402 402 // other registers used in this stub
403 403 const Register exception_oop = rax;
404 404 const Register handler_addr = rbx;
405 405 const Register exception_pc = rdx;
406 406
407 407 // Upon entry, the sp points to the return address returning into Java
408 408 // (interpreted or compiled) code; i.e., the return address becomes the
409 409 // throwing pc.
410 410 //
411 411 // Arguments pushed before the runtime call are still on the stack but
412 412 // the exception handler will reset the stack pointer -> ignore them.
413 413 // A potential result in registers can be ignored as well.
414 414
415 415 #ifdef ASSERT
416 416 // make sure this code is only executed if there is a pending exception
417 417 { Label L;
418 418 __ get_thread(thread);
419 419 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
420 420 __ jcc(Assembler::notEqual, L);
421 421 __ stop("StubRoutines::forward exception: no pending exception (1)");
422 422 __ bind(L);
423 423 }
424 424 #endif
425 425
426 426 // compute exception handler into rbx,
427 427 __ get_thread(thread);
428 428 __ movptr(exception_pc, Address(rsp, 0));
429 429 BLOCK_COMMENT("call exception_handler_for_return_address");
430 430 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
431 431 __ mov(handler_addr, rax);
432 432
433 433 // setup rax & rdx, remove return address & clear pending exception
434 434 __ get_thread(thread);
435 435 __ pop(exception_pc);
436 436 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
437 437 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
438 438
439 439 #ifdef ASSERT
440 440 // make sure exception is set
441 441 { Label L;
442 442 __ testptr(exception_oop, exception_oop);
443 443 __ jcc(Assembler::notEqual, L);
444 444 __ stop("StubRoutines::forward exception: no pending exception (2)");
445 445 __ bind(L);
446 446 }
447 447 #endif
448 448
449 449 // Verify that there is really a valid exception in RAX.
450 450 __ verify_oop(exception_oop);
451 451
452 452 // Restore SP from BP if the exception PC is a MethodHandle call site.
453 453 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
454 454 __ cmovptr(Assembler::notEqual, rsp, rbp);
455 455
456 456 // continue at exception handler (return address removed)
457 457 // rax: exception
458 458 // rbx: exception handler
459 459 // rdx: throwing pc
460 460 __ jmp(handler_addr);
461 461
462 462 return start;
463 463 }
464 464
465 465
466 466 //----------------------------------------------------------------------------------------------------
467 467 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest)
468 468 //
469 469 // xchg exists as far back as 8086, lock needed for MP only
470 470 // Stack layout immediately after call:
471 471 //
472 472 // 0 [ret addr ] <--- rsp
473 473 // 1 [ ex ]
474 474 // 2 [ dest ]
475 475 //
476 476 // Result: *dest <- ex, return (old *dest)
477 477 //
478 478 // Note: win32 does not currently use this code
479 479
480 480 address generate_atomic_xchg() {
481 481 StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
482 482 address start = __ pc();
483 483
484 484 __ push(rdx);
485 485 Address exchange(rsp, 2 * wordSize);
486 486 Address dest_addr(rsp, 3 * wordSize);
487 487 __ movl(rax, exchange);
488 488 __ movptr(rdx, dest_addr);
489 489 __ xchgl(rax, Address(rdx, 0));
490 490 __ pop(rdx);
491 491 __ ret(0);
492 492
493 493 return start;
494 494 }
495 495
496 496 //----------------------------------------------------------------------------------------------------
497 497 // Support for void verify_mxcsr()
498 498 //
499 499 // This routine is used with -Xcheck:jni to verify that native
500 500 // JNI code does not return to Java code without restoring the
501 501 // MXCSR register to our expected state.
502 502
503 503
504 504 address generate_verify_mxcsr() {
505 505 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr");
506 506 address start = __ pc();
507 507
508 508 const Address mxcsr_save(rsp, 0);
509 509
510 510 if (CheckJNICalls && UseSSE > 0 ) {
511 511 Label ok_ret;
512 512 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std());
513 513 __ push(rax);
514 514 __ subptr(rsp, wordSize); // allocate a temp location
515 515 __ stmxcsr(mxcsr_save);
516 516 __ movl(rax, mxcsr_save);
517 517 __ andl(rax, MXCSR_MASK);
518 518 __ cmp32(rax, mxcsr_std);
519 519 __ jcc(Assembler::equal, ok_ret);
520 520
521 521 __ warn("MXCSR changed by native JNI code.");
522 522
523 523 __ ldmxcsr(mxcsr_std);
524 524
525 525 __ bind(ok_ret);
526 526 __ addptr(rsp, wordSize);
527 527 __ pop(rax);
528 528 }
529 529
530 530 __ ret(0);
531 531
532 532 return start;
533 533 }
534 534
535 535
536 536 //---------------------------------------------------------------------------
537 537 // Support for void verify_fpu_cntrl_wrd()
538 538 //
539 539 // This routine is used with -Xcheck:jni to verify that native
540 540 // JNI code does not return to Java code without restoring the
541 541 // FP control word to our expected state.
542 542
543 543 address generate_verify_fpu_cntrl_wrd() {
544 544 StubCodeMark mark(this, "StubRoutines", "verify_spcw");
545 545 address start = __ pc();
546 546
547 547 const Address fpu_cntrl_wrd_save(rsp, 0);
548 548
549 549 if (CheckJNICalls) {
550 550 Label ok_ret;
551 551 __ push(rax);
552 552 __ subptr(rsp, wordSize); // allocate a temp location
553 553 __ fnstcw(fpu_cntrl_wrd_save);
554 554 __ movl(rax, fpu_cntrl_wrd_save);
555 555 __ andl(rax, FPU_CNTRL_WRD_MASK);
556 556 ExternalAddress fpu_std(StubRoutines::addr_fpu_cntrl_wrd_std());
557 557 __ cmp32(rax, fpu_std);
558 558 __ jcc(Assembler::equal, ok_ret);
559 559
560 560 __ warn("Floating point control word changed by native JNI code.");
561 561
562 562 __ fldcw(fpu_std);
563 563
564 564 __ bind(ok_ret);
565 565 __ addptr(rsp, wordSize);
566 566 __ pop(rax);
567 567 }
568 568
569 569 __ ret(0);
570 570
571 571 return start;
572 572 }
573 573
574 574 //---------------------------------------------------------------------------
575 575 // Wrapper for slow-case handling of double-to-integer conversion
576 576 // d2i or f2i fast case failed either because it is nan or because
577 577 // of under/overflow.
578 578 // Input: FPU TOS: float value
579 579 // Output: rax, (rdx): integer (long) result
580 580
581 581 address generate_d2i_wrapper(BasicType t, address fcn) {
582 582 StubCodeMark mark(this, "StubRoutines", "d2i_wrapper");
583 583 address start = __ pc();
584 584
585 585 // Capture info about frame layout
586 586 enum layout { FPUState_off = 0,
587 587 rbp_off = FPUStateSizeInWords,
588 588 rdi_off,
589 589 rsi_off,
590 590 rcx_off,
591 591 rbx_off,
592 592 saved_argument_off,
593 593 saved_argument_off2, // 2nd half of double
594 594 framesize
595 595 };
596 596
597 597 assert(FPUStateSizeInWords == 27, "update stack layout");
598 598
599 599 // Save outgoing argument to stack across push_FPU_state()
600 600 __ subptr(rsp, wordSize * 2);
601 601 __ fstp_d(Address(rsp, 0));
602 602
603 603 // Save CPU & FPU state
604 604 __ push(rbx);
605 605 __ push(rcx);
606 606 __ push(rsi);
607 607 __ push(rdi);
608 608 __ push(rbp);
609 609 __ push_FPU_state();
610 610
611 611 // push_FPU_state() resets the FP top of stack
612 612 // Load original double into FP top of stack
613 613 __ fld_d(Address(rsp, saved_argument_off * wordSize));
614 614 // Store double into stack as outgoing argument
615 615 __ subptr(rsp, wordSize*2);
616 616 __ fst_d(Address(rsp, 0));
617 617
618 618 // Prepare FPU for doing math in C-land
619 619 __ empty_FPU_stack();
620 620 // Call the C code to massage the double. Result in EAX
621 621 if (t == T_INT)
622 622 { BLOCK_COMMENT("SharedRuntime::d2i"); }
623 623 else if (t == T_LONG)
624 624 { BLOCK_COMMENT("SharedRuntime::d2l"); }
625 625 __ call_VM_leaf( fcn, 2 );
626 626
627 627 // Restore CPU & FPU state
628 628 __ pop_FPU_state();
629 629 __ pop(rbp);
630 630 __ pop(rdi);
631 631 __ pop(rsi);
632 632 __ pop(rcx);
633 633 __ pop(rbx);
634 634 __ addptr(rsp, wordSize * 2);
635 635
636 636 __ ret(0);
637 637
638 638 return start;
639 639 }
640 640
641 641
642 642 //---------------------------------------------------------------------------
643 643 // The following routine generates a subroutine to throw an asynchronous
644 644 // UnknownError when an unsafe access gets a fault that could not be
645 645 // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.)
646 646 address generate_handler_for_unsafe_access() {
647 647 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
648 648 address start = __ pc();
649 649
650 650 __ push(0); // hole for return address-to-be
651 651 __ pusha(); // push registers
652 652 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
653 653 BLOCK_COMMENT("call handle_unsafe_access");
654 654 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access)));
655 655 __ movptr(next_pc, rax); // stuff next address
656 656 __ popa();
657 657 __ ret(0); // jump to next address
658 658
659 659 return start;
660 660 }
661 661
662 662
663 663 //----------------------------------------------------------------------------------------------------
664 664 // Non-destructive plausibility checks for oops
665 665
666 666 address generate_verify_oop() {
667 667 StubCodeMark mark(this, "StubRoutines", "verify_oop");
668 668 address start = __ pc();
669 669
670 670 // Incoming arguments on stack after saving rax,:
671 671 //
672 672 // [tos ]: saved rdx
673 673 // [tos + 1]: saved EFLAGS
674 674 // [tos + 2]: return address
675 675 // [tos + 3]: char* error message
676 676 // [tos + 4]: oop object to verify
677 677 // [tos + 5]: saved rax, - saved by caller and bashed
678 678
679 679 Label exit, error;
680 680 __ pushf();
681 681 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr()));
682 682 __ push(rdx); // save rdx
683 683 // make sure object is 'reasonable'
684 684 __ movptr(rax, Address(rsp, 4 * wordSize)); // get object
685 685 __ testptr(rax, rax);
686 686 __ jcc(Assembler::zero, exit); // if obj is NULL it is ok
687 687
688 688 // Check if the oop is in the right area of memory
689 689 const int oop_mask = Universe::verify_oop_mask();
690 690 const int oop_bits = Universe::verify_oop_bits();
691 691 __ mov(rdx, rax);
692 692 __ andptr(rdx, oop_mask);
693 693 __ cmpptr(rdx, oop_bits);
694 694 __ jcc(Assembler::notZero, error);
695 695
696 696 // make sure klass is 'reasonable'
697 697 __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass
698 698 __ testptr(rax, rax);
699 699 __ jcc(Assembler::zero, error); // if klass is NULL it is broken
700 700
701 701 // Check if the klass is in the right area of memory
702 702 const int klass_mask = Universe::verify_klass_mask();
703 703 const int klass_bits = Universe::verify_klass_bits();
704 704 __ mov(rdx, rax);
705 705 __ andptr(rdx, klass_mask);
706 706 __ cmpptr(rdx, klass_bits);
707 707 __ jcc(Assembler::notZero, error);
708 708
709 709 // make sure klass' klass is 'reasonable'
710 710 __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass' klass
711 711 __ testptr(rax, rax);
712 712 __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken
713 713
714 714 __ mov(rdx, rax);
715 715 __ andptr(rdx, klass_mask);
716 716 __ cmpptr(rdx, klass_bits);
717 717 __ jcc(Assembler::notZero, error); // if klass not in right area
718 718 // of memory it is broken too.
719 719
720 720 // return if everything seems ok
721 721 __ bind(exit);
722 722 __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back
723 723 __ pop(rdx); // restore rdx
724 724 __ popf(); // restore EFLAGS
725 725 __ ret(3 * wordSize); // pop arguments
726 726
727 727 // handle errors
728 728 __ bind(error);
729 729 __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back
730 730 __ pop(rdx); // get saved rdx back
731 731 __ popf(); // get saved EFLAGS off stack -- will be ignored
732 732 __ pusha(); // push registers (eip = return address & msg are already pushed)
733 733 BLOCK_COMMENT("call MacroAssembler::debug");
734 734 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
735 735 __ popa();
736 736 __ ret(3 * wordSize); // pop arguments
737 737 return start;
738 738 }
739 739
740 740 //
741 741 // Generate pre-barrier for array stores
742 742 //
743 743 // Input:
744 744 // start - starting address
745 745 // count - element count
746 746 void gen_write_ref_array_pre_barrier(Register start, Register count) {
747 747 assert_different_registers(start, count);
748 748 BarrierSet* bs = Universe::heap()->barrier_set();
749 749 switch (bs->kind()) {
750 750 case BarrierSet::G1SATBCT:
751 751 case BarrierSet::G1SATBCTLogging:
752 752 {
753 753 __ pusha(); // push registers
754 754 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre),
755 755 start, count);
756 756 __ popa();
757 757 }
758 758 break;
759 759 case BarrierSet::CardTableModRef:
760 760 case BarrierSet::CardTableExtension:
761 761 case BarrierSet::ModRef:
762 762 break;
763 763 default :
764 764 ShouldNotReachHere();
765 765
766 766 }
767 767 }
768 768
769 769
770 770 //
771 771 // Generate a post-barrier for an array store
772 772 //
773 773 // start - starting address
774 774 // count - element count
775 775 //
776 776 // The two input registers are overwritten.
777 777 //
778 778 void gen_write_ref_array_post_barrier(Register start, Register count) {
779 779 BarrierSet* bs = Universe::heap()->barrier_set();
780 780 assert_different_registers(start, count);
781 781 switch (bs->kind()) {
782 782 case BarrierSet::G1SATBCT:
783 783 case BarrierSet::G1SATBCTLogging:
784 784 {
785 785 __ pusha(); // push registers
786 786 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post),
787 787 start, count);
788 788 __ popa();
789 789 }
790 790 break;
791 791
792 792 case BarrierSet::CardTableModRef:
793 793 case BarrierSet::CardTableExtension:
794 794 {
795 795 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
796 796 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
797 797
798 798 Label L_loop;
799 799 const Register end = count; // elements count; end == start+count-1
800 800 assert_different_registers(start, end);
801 801
802 802 __ lea(end, Address(start, count, Address::times_ptr, -wordSize));
803 803 __ shrptr(start, CardTableModRefBS::card_shift);
804 804 __ shrptr(end, CardTableModRefBS::card_shift);
805 805 __ subptr(end, start); // end --> count
806 806 __ BIND(L_loop);
807 807 intptr_t disp = (intptr_t) ct->byte_map_base;
808 808 Address cardtable(start, count, Address::times_1, disp);
809 809 __ movb(cardtable, 0);
810 810 __ decrement(count);
811 811 __ jcc(Assembler::greaterEqual, L_loop);
812 812 }
813 813 break;
814 814 case BarrierSet::ModRef:
815 815 break;
816 816 default :
817 817 ShouldNotReachHere();
818 818
819 819 }
820 820 }
821 821
822 822
823 823 // Copy 64 bytes chunks
824 824 //
825 825 // Inputs:
826 826 // from - source array address
827 827 // to_from - destination array address - from
828 828 // qword_count - 8-bytes element count, negative
829 829 //
830 830 void xmm_copy_forward(Register from, Register to_from, Register qword_count) {
831 831 assert( UseSSE >= 2, "supported cpu only" );
832 832 Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit;
833 833 // Copy 64-byte chunks
834 834 __ jmpb(L_copy_64_bytes);
835 835 __ align(OptoLoopAlignment);
836 836 __ BIND(L_copy_64_bytes_loop);
837 837
838 838 if(UseUnalignedLoadStores) {
839 839 __ movdqu(xmm0, Address(from, 0));
840 840 __ movdqu(Address(from, to_from, Address::times_1, 0), xmm0);
841 841 __ movdqu(xmm1, Address(from, 16));
842 842 __ movdqu(Address(from, to_from, Address::times_1, 16), xmm1);
843 843 __ movdqu(xmm2, Address(from, 32));
844 844 __ movdqu(Address(from, to_from, Address::times_1, 32), xmm2);
845 845 __ movdqu(xmm3, Address(from, 48));
846 846 __ movdqu(Address(from, to_from, Address::times_1, 48), xmm3);
847 847
848 848 } else {
849 849 __ movq(xmm0, Address(from, 0));
850 850 __ movq(Address(from, to_from, Address::times_1, 0), xmm0);
851 851 __ movq(xmm1, Address(from, 8));
852 852 __ movq(Address(from, to_from, Address::times_1, 8), xmm1);
853 853 __ movq(xmm2, Address(from, 16));
854 854 __ movq(Address(from, to_from, Address::times_1, 16), xmm2);
855 855 __ movq(xmm3, Address(from, 24));
856 856 __ movq(Address(from, to_from, Address::times_1, 24), xmm3);
857 857 __ movq(xmm4, Address(from, 32));
858 858 __ movq(Address(from, to_from, Address::times_1, 32), xmm4);
859 859 __ movq(xmm5, Address(from, 40));
860 860 __ movq(Address(from, to_from, Address::times_1, 40), xmm5);
861 861 __ movq(xmm6, Address(from, 48));
862 862 __ movq(Address(from, to_from, Address::times_1, 48), xmm6);
863 863 __ movq(xmm7, Address(from, 56));
864 864 __ movq(Address(from, to_from, Address::times_1, 56), xmm7);
865 865 }
866 866
867 867 __ addl(from, 64);
868 868 __ BIND(L_copy_64_bytes);
869 869 __ subl(qword_count, 8);
870 870 __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop);
871 871 __ addl(qword_count, 8);
872 872 __ jccb(Assembler::zero, L_exit);
873 873 //
874 874 // length is too short, just copy qwords
875 875 //
876 876 __ BIND(L_copy_8_bytes);
877 877 __ movq(xmm0, Address(from, 0));
878 878 __ movq(Address(from, to_from, Address::times_1), xmm0);
879 879 __ addl(from, 8);
880 880 __ decrement(qword_count);
881 881 __ jcc(Assembler::greater, L_copy_8_bytes);
882 882 __ BIND(L_exit);
883 883 }
884 884
885 885 // Copy 64 bytes chunks
886 886 //
887 887 // Inputs:
888 888 // from - source array address
889 889 // to_from - destination array address - from
890 890 // qword_count - 8-bytes element count, negative
891 891 //
892 892 void mmx_copy_forward(Register from, Register to_from, Register qword_count) {
893 893 assert( VM_Version::supports_mmx(), "supported cpu only" );
894 894 Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit;
895 895 // Copy 64-byte chunks
896 896 __ jmpb(L_copy_64_bytes);
897 897 __ align(OptoLoopAlignment);
898 898 __ BIND(L_copy_64_bytes_loop);
899 899 __ movq(mmx0, Address(from, 0));
900 900 __ movq(mmx1, Address(from, 8));
901 901 __ movq(mmx2, Address(from, 16));
902 902 __ movq(Address(from, to_from, Address::times_1, 0), mmx0);
903 903 __ movq(mmx3, Address(from, 24));
904 904 __ movq(Address(from, to_from, Address::times_1, 8), mmx1);
905 905 __ movq(mmx4, Address(from, 32));
906 906 __ movq(Address(from, to_from, Address::times_1, 16), mmx2);
907 907 __ movq(mmx5, Address(from, 40));
908 908 __ movq(Address(from, to_from, Address::times_1, 24), mmx3);
909 909 __ movq(mmx6, Address(from, 48));
910 910 __ movq(Address(from, to_from, Address::times_1, 32), mmx4);
911 911 __ movq(mmx7, Address(from, 56));
912 912 __ movq(Address(from, to_from, Address::times_1, 40), mmx5);
913 913 __ movq(Address(from, to_from, Address::times_1, 48), mmx6);
914 914 __ movq(Address(from, to_from, Address::times_1, 56), mmx7);
915 915 __ addptr(from, 64);
916 916 __ BIND(L_copy_64_bytes);
917 917 __ subl(qword_count, 8);
918 918 __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop);
919 919 __ addl(qword_count, 8);
920 920 __ jccb(Assembler::zero, L_exit);
921 921 //
922 922 // length is too short, just copy qwords
923 923 //
924 924 __ BIND(L_copy_8_bytes);
925 925 __ movq(mmx0, Address(from, 0));
926 926 __ movq(Address(from, to_from, Address::times_1), mmx0);
927 927 __ addptr(from, 8);
928 928 __ decrement(qword_count);
929 929 __ jcc(Assembler::greater, L_copy_8_bytes);
930 930 __ BIND(L_exit);
931 931 __ emms();
932 932 }
933 933
934 934 address generate_disjoint_copy(BasicType t, bool aligned,
935 935 Address::ScaleFactor sf,
936 936 address* entry, const char *name) {
937 937 __ align(CodeEntryAlignment);
938 938 StubCodeMark mark(this, "StubRoutines", name);
939 939 address start = __ pc();
940 940
941 941 Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte;
942 942 Label L_copy_2_bytes, L_copy_4_bytes, L_copy_64_bytes;
943 943
944 944 int shift = Address::times_ptr - sf;
945 945
946 946 const Register from = rsi; // source array address
947 947 const Register to = rdi; // destination array address
↓ open down ↓ |
935 lines elided |
↑ open up ↑ |
948 948 const Register count = rcx; // elements count
949 949 const Register to_from = to; // (to - from)
950 950 const Register saved_to = rdx; // saved destination array address
951 951
952 952 __ enter(); // required for proper stackwalking of RuntimeStub frame
953 953 __ push(rsi);
954 954 __ push(rdi);
955 955 __ movptr(from , Address(rsp, 12+ 4));
956 956 __ movptr(to , Address(rsp, 12+ 8));
957 957 __ movl(count, Address(rsp, 12+ 12));
958 +
959 + if (entry != NULL) {
960 + *entry = __ pc(); // Entry point from conjoint arraycopy stub.
961 + BLOCK_COMMENT("Entry:");
962 + }
963 +
958 964 if (t == T_OBJECT) {
959 965 __ testl(count, count);
960 966 __ jcc(Assembler::zero, L_0_count);
961 967 gen_write_ref_array_pre_barrier(to, count);
962 968 __ mov(saved_to, to); // save 'to'
963 969 }
964 970
965 - *entry = __ pc(); // Entry point from conjoint arraycopy stub.
966 - BLOCK_COMMENT("Entry:");
967 -
968 971 __ subptr(to, from); // to --> to_from
969 972 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
970 973 __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp
971 974 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
972 975 // align source address at 4 bytes address boundary
973 976 if (t == T_BYTE) {
974 977 // One byte misalignment happens only for byte arrays
975 978 __ testl(from, 1);
976 979 __ jccb(Assembler::zero, L_skip_align1);
977 980 __ movb(rax, Address(from, 0));
978 981 __ movb(Address(from, to_from, Address::times_1, 0), rax);
979 982 __ increment(from);
980 983 __ decrement(count);
981 984 __ BIND(L_skip_align1);
982 985 }
983 986 // Two bytes misalignment happens only for byte and short (char) arrays
984 987 __ testl(from, 2);
985 988 __ jccb(Assembler::zero, L_skip_align2);
986 989 __ movw(rax, Address(from, 0));
987 990 __ movw(Address(from, to_from, Address::times_1, 0), rax);
988 991 __ addptr(from, 2);
989 992 __ subl(count, 1<<(shift-1));
990 993 __ BIND(L_skip_align2);
991 994 }
992 995 if (!VM_Version::supports_mmx()) {
993 996 __ mov(rax, count); // save 'count'
994 997 __ shrl(count, shift); // bytes count
995 998 __ addptr(to_from, from);// restore 'to'
996 999 __ rep_mov();
997 1000 __ subptr(to_from, from);// restore 'to_from'
998 1001 __ mov(count, rax); // restore 'count'
999 1002 __ jmpb(L_copy_2_bytes); // all dwords were copied
1000 1003 } else {
1001 1004 if (!UseUnalignedLoadStores) {
1002 1005 // align to 8 bytes, we know we are 4 byte aligned to start
1003 1006 __ testptr(from, 4);
1004 1007 __ jccb(Assembler::zero, L_copy_64_bytes);
1005 1008 __ movl(rax, Address(from, 0));
1006 1009 __ movl(Address(from, to_from, Address::times_1, 0), rax);
1007 1010 __ addptr(from, 4);
1008 1011 __ subl(count, 1<<shift);
1009 1012 }
1010 1013 __ BIND(L_copy_64_bytes);
1011 1014 __ mov(rax, count);
1012 1015 __ shrl(rax, shift+1); // 8 bytes chunk count
1013 1016 //
1014 1017 // Copy 8-byte chunks through MMX registers, 8 per iteration of the loop
1015 1018 //
1016 1019 if (UseXMMForArrayCopy) {
1017 1020 xmm_copy_forward(from, to_from, rax);
1018 1021 } else {
1019 1022 mmx_copy_forward(from, to_from, rax);
1020 1023 }
1021 1024 }
1022 1025 // copy tailing dword
1023 1026 __ BIND(L_copy_4_bytes);
1024 1027 __ testl(count, 1<<shift);
1025 1028 __ jccb(Assembler::zero, L_copy_2_bytes);
1026 1029 __ movl(rax, Address(from, 0));
1027 1030 __ movl(Address(from, to_from, Address::times_1, 0), rax);
1028 1031 if (t == T_BYTE || t == T_SHORT) {
1029 1032 __ addptr(from, 4);
1030 1033 __ BIND(L_copy_2_bytes);
1031 1034 // copy tailing word
1032 1035 __ testl(count, 1<<(shift-1));
1033 1036 __ jccb(Assembler::zero, L_copy_byte);
1034 1037 __ movw(rax, Address(from, 0));
1035 1038 __ movw(Address(from, to_from, Address::times_1, 0), rax);
1036 1039 if (t == T_BYTE) {
1037 1040 __ addptr(from, 2);
1038 1041 __ BIND(L_copy_byte);
1039 1042 // copy tailing byte
1040 1043 __ testl(count, 1);
1041 1044 __ jccb(Assembler::zero, L_exit);
1042 1045 __ movb(rax, Address(from, 0));
1043 1046 __ movb(Address(from, to_from, Address::times_1, 0), rax);
1044 1047 __ BIND(L_exit);
1045 1048 } else {
1046 1049 __ BIND(L_copy_byte);
1047 1050 }
1048 1051 } else {
1049 1052 __ BIND(L_copy_2_bytes);
1050 1053 }
1051 1054
1052 1055 if (t == T_OBJECT) {
1053 1056 __ movl(count, Address(rsp, 12+12)); // reread 'count'
1054 1057 __ mov(to, saved_to); // restore 'to'
1055 1058 gen_write_ref_array_post_barrier(to, count);
1056 1059 __ BIND(L_0_count);
1057 1060 }
1058 1061 inc_copy_counter_np(t);
1059 1062 __ pop(rdi);
1060 1063 __ pop(rsi);
1061 1064 __ leave(); // required for proper stackwalking of RuntimeStub frame
1062 1065 __ xorptr(rax, rax); // return 0
1063 1066 __ ret(0);
1064 1067 return start;
1065 1068 }
1066 1069
1067 1070
1068 1071 address generate_fill(BasicType t, bool aligned, const char *name) {
1069 1072 __ align(CodeEntryAlignment);
1070 1073 StubCodeMark mark(this, "StubRoutines", name);
1071 1074 address start = __ pc();
1072 1075
1073 1076 BLOCK_COMMENT("Entry:");
1074 1077
1075 1078 const Register to = rdi; // source array address
1076 1079 const Register value = rdx; // value
1077 1080 const Register count = rsi; // elements count
1078 1081
1079 1082 __ enter(); // required for proper stackwalking of RuntimeStub frame
1080 1083 __ push(rsi);
1081 1084 __ push(rdi);
1082 1085 __ movptr(to , Address(rsp, 12+ 4));
1083 1086 __ movl(value, Address(rsp, 12+ 8));
1084 1087 __ movl(count, Address(rsp, 12+ 12));
1085 1088
1086 1089 __ generate_fill(t, aligned, to, value, count, rax, xmm0);
1087 1090
1088 1091 __ pop(rdi);
1089 1092 __ pop(rsi);
1090 1093 __ leave(); // required for proper stackwalking of RuntimeStub frame
1091 1094 __ ret(0);
1092 1095 return start;
1093 1096 }
1094 1097
1095 1098 address generate_conjoint_copy(BasicType t, bool aligned,
1096 1099 Address::ScaleFactor sf,
1097 1100 address nooverlap_target,
1098 1101 address* entry, const char *name) {
1099 1102 __ align(CodeEntryAlignment);
1100 1103 StubCodeMark mark(this, "StubRoutines", name);
1101 1104 address start = __ pc();
1102 1105
1103 1106 Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte;
1104 1107 Label L_copy_2_bytes, L_copy_4_bytes, L_copy_8_bytes, L_copy_8_bytes_loop;
1105 1108
1106 1109 int shift = Address::times_ptr - sf;
1107 1110
1108 1111 const Register src = rax; // source array address
1109 1112 const Register dst = rdx; // destination array address
1110 1113 const Register from = rsi; // source array address
↓ open down ↓ |
133 lines elided |
↑ open up ↑ |
1111 1114 const Register to = rdi; // destination array address
1112 1115 const Register count = rcx; // elements count
1113 1116 const Register end = rax; // array end address
1114 1117
1115 1118 __ enter(); // required for proper stackwalking of RuntimeStub frame
1116 1119 __ push(rsi);
1117 1120 __ push(rdi);
1118 1121 __ movptr(src , Address(rsp, 12+ 4)); // from
1119 1122 __ movptr(dst , Address(rsp, 12+ 8)); // to
1120 1123 __ movl2ptr(count, Address(rsp, 12+12)); // count
1121 - if (t == T_OBJECT) {
1122 - gen_write_ref_array_pre_barrier(dst, count);
1123 - }
1124 1124
1125 1125 if (entry != NULL) {
1126 1126 *entry = __ pc(); // Entry point from generic arraycopy stub.
1127 1127 BLOCK_COMMENT("Entry:");
1128 1128 }
1129 1129
1130 - if (t == T_OBJECT) {
1131 - __ testl(count, count);
1132 - __ jcc(Assembler::zero, L_0_count);
1133 - }
1130 + // nooverlap_target expects arguments in rsi and rdi.
1134 1131 __ mov(from, src);
1135 1132 __ mov(to , dst);
1136 1133
1137 - // arrays overlap test
1134 + // arrays overlap test: dispatch to disjoint stub if necessary.
1138 1135 RuntimeAddress nooverlap(nooverlap_target);
1139 1136 __ cmpptr(dst, src);
1140 1137 __ lea(end, Address(src, count, sf, 0)); // src + count * elem_size
1141 1138 __ jump_cc(Assembler::belowEqual, nooverlap);
1142 1139 __ cmpptr(dst, end);
1143 1140 __ jump_cc(Assembler::aboveEqual, nooverlap);
1144 1141
1142 + if (t == T_OBJECT) {
1143 + __ testl(count, count);
1144 + __ jcc(Assembler::zero, L_0_count);
1145 + gen_write_ref_array_pre_barrier(dst, count);
1146 + }
1147 +
1145 1148 // copy from high to low
1146 1149 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
1147 1150 __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp
1148 1151 if (t == T_BYTE || t == T_SHORT) {
1149 1152 // Align the end of destination array at 4 bytes address boundary
1150 1153 __ lea(end, Address(dst, count, sf, 0));
1151 1154 if (t == T_BYTE) {
1152 1155 // One byte misalignment happens only for byte arrays
1153 1156 __ testl(end, 1);
1154 1157 __ jccb(Assembler::zero, L_skip_align1);
1155 1158 __ decrement(count);
1156 1159 __ movb(rdx, Address(from, count, sf, 0));
1157 1160 __ movb(Address(to, count, sf, 0), rdx);
1158 1161 __ BIND(L_skip_align1);
1159 1162 }
1160 1163 // Two bytes misalignment happens only for byte and short (char) arrays
1161 1164 __ testl(end, 2);
1162 1165 __ jccb(Assembler::zero, L_skip_align2);
1163 1166 __ subptr(count, 1<<(shift-1));
1164 1167 __ movw(rdx, Address(from, count, sf, 0));
1165 1168 __ movw(Address(to, count, sf, 0), rdx);
1166 1169 __ BIND(L_skip_align2);
1167 1170 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
1168 1171 __ jcc(Assembler::below, L_copy_4_bytes);
1169 1172 }
1170 1173
1171 1174 if (!VM_Version::supports_mmx()) {
1172 1175 __ std();
1173 1176 __ mov(rax, count); // Save 'count'
1174 1177 __ mov(rdx, to); // Save 'to'
1175 1178 __ lea(rsi, Address(from, count, sf, -4));
1176 1179 __ lea(rdi, Address(to , count, sf, -4));
1177 1180 __ shrptr(count, shift); // bytes count
1178 1181 __ rep_mov();
1179 1182 __ cld();
1180 1183 __ mov(count, rax); // restore 'count'
1181 1184 __ andl(count, (1<<shift)-1); // mask the number of rest elements
1182 1185 __ movptr(from, Address(rsp, 12+4)); // reread 'from'
1183 1186 __ mov(to, rdx); // restore 'to'
1184 1187 __ jmpb(L_copy_2_bytes); // all dword were copied
1185 1188 } else {
1186 1189 // Align to 8 bytes the end of array. It is aligned to 4 bytes already.
1187 1190 __ testptr(end, 4);
1188 1191 __ jccb(Assembler::zero, L_copy_8_bytes);
1189 1192 __ subl(count, 1<<shift);
1190 1193 __ movl(rdx, Address(from, count, sf, 0));
1191 1194 __ movl(Address(to, count, sf, 0), rdx);
1192 1195 __ jmpb(L_copy_8_bytes);
1193 1196
1194 1197 __ align(OptoLoopAlignment);
1195 1198 // Move 8 bytes
1196 1199 __ BIND(L_copy_8_bytes_loop);
1197 1200 if (UseXMMForArrayCopy) {
1198 1201 __ movq(xmm0, Address(from, count, sf, 0));
1199 1202 __ movq(Address(to, count, sf, 0), xmm0);
1200 1203 } else {
1201 1204 __ movq(mmx0, Address(from, count, sf, 0));
1202 1205 __ movq(Address(to, count, sf, 0), mmx0);
1203 1206 }
1204 1207 __ BIND(L_copy_8_bytes);
1205 1208 __ subl(count, 2<<shift);
1206 1209 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
1207 1210 __ addl(count, 2<<shift);
1208 1211 if (!UseXMMForArrayCopy) {
1209 1212 __ emms();
1210 1213 }
1211 1214 }
1212 1215 __ BIND(L_copy_4_bytes);
1213 1216 // copy prefix qword
1214 1217 __ testl(count, 1<<shift);
1215 1218 __ jccb(Assembler::zero, L_copy_2_bytes);
1216 1219 __ movl(rdx, Address(from, count, sf, -4));
1217 1220 __ movl(Address(to, count, sf, -4), rdx);
1218 1221
1219 1222 if (t == T_BYTE || t == T_SHORT) {
1220 1223 __ subl(count, (1<<shift));
1221 1224 __ BIND(L_copy_2_bytes);
1222 1225 // copy prefix dword
1223 1226 __ testl(count, 1<<(shift-1));
1224 1227 __ jccb(Assembler::zero, L_copy_byte);
1225 1228 __ movw(rdx, Address(from, count, sf, -2));
1226 1229 __ movw(Address(to, count, sf, -2), rdx);
1227 1230 if (t == T_BYTE) {
1228 1231 __ subl(count, 1<<(shift-1));
1229 1232 __ BIND(L_copy_byte);
1230 1233 // copy prefix byte
1231 1234 __ testl(count, 1);
1232 1235 __ jccb(Assembler::zero, L_exit);
1233 1236 __ movb(rdx, Address(from, 0));
1234 1237 __ movb(Address(to, 0), rdx);
1235 1238 __ BIND(L_exit);
1236 1239 } else {
1237 1240 __ BIND(L_copy_byte);
1238 1241 }
1239 1242 } else {
1240 1243 __ BIND(L_copy_2_bytes);
1241 1244 }
1242 1245 if (t == T_OBJECT) {
1243 1246 __ movl2ptr(count, Address(rsp, 12+12)); // reread count
1244 1247 gen_write_ref_array_post_barrier(to, count);
1245 1248 __ BIND(L_0_count);
1246 1249 }
1247 1250 inc_copy_counter_np(t);
1248 1251 __ pop(rdi);
1249 1252 __ pop(rsi);
1250 1253 __ leave(); // required for proper stackwalking of RuntimeStub frame
1251 1254 __ xorptr(rax, rax); // return 0
1252 1255 __ ret(0);
1253 1256 return start;
1254 1257 }
1255 1258
1256 1259
1257 1260 address generate_disjoint_long_copy(address* entry, const char *name) {
1258 1261 __ align(CodeEntryAlignment);
1259 1262 StubCodeMark mark(this, "StubRoutines", name);
1260 1263 address start = __ pc();
1261 1264
1262 1265 Label L_copy_8_bytes, L_copy_8_bytes_loop;
1263 1266 const Register from = rax; // source array address
1264 1267 const Register to = rdx; // destination array address
1265 1268 const Register count = rcx; // elements count
1266 1269 const Register to_from = rdx; // (to - from)
1267 1270
1268 1271 __ enter(); // required for proper stackwalking of RuntimeStub frame
1269 1272 __ movptr(from , Address(rsp, 8+0)); // from
1270 1273 __ movptr(to , Address(rsp, 8+4)); // to
1271 1274 __ movl2ptr(count, Address(rsp, 8+8)); // count
1272 1275
1273 1276 *entry = __ pc(); // Entry point from conjoint arraycopy stub.
1274 1277 BLOCK_COMMENT("Entry:");
1275 1278
1276 1279 __ subptr(to, from); // to --> to_from
1277 1280 if (VM_Version::supports_mmx()) {
1278 1281 if (UseXMMForArrayCopy) {
1279 1282 xmm_copy_forward(from, to_from, count);
1280 1283 } else {
1281 1284 mmx_copy_forward(from, to_from, count);
1282 1285 }
1283 1286 } else {
1284 1287 __ jmpb(L_copy_8_bytes);
1285 1288 __ align(OptoLoopAlignment);
1286 1289 __ BIND(L_copy_8_bytes_loop);
1287 1290 __ fild_d(Address(from, 0));
1288 1291 __ fistp_d(Address(from, to_from, Address::times_1));
1289 1292 __ addptr(from, 8);
1290 1293 __ BIND(L_copy_8_bytes);
1291 1294 __ decrement(count);
1292 1295 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
1293 1296 }
1294 1297 inc_copy_counter_np(T_LONG);
1295 1298 __ leave(); // required for proper stackwalking of RuntimeStub frame
1296 1299 __ xorptr(rax, rax); // return 0
1297 1300 __ ret(0);
1298 1301 return start;
1299 1302 }
1300 1303
1301 1304 address generate_conjoint_long_copy(address nooverlap_target,
1302 1305 address* entry, const char *name) {
1303 1306 __ align(CodeEntryAlignment);
1304 1307 StubCodeMark mark(this, "StubRoutines", name);
1305 1308 address start = __ pc();
1306 1309
1307 1310 Label L_copy_8_bytes, L_copy_8_bytes_loop;
1308 1311 const Register from = rax; // source array address
1309 1312 const Register to = rdx; // destination array address
1310 1313 const Register count = rcx; // elements count
1311 1314 const Register end_from = rax; // source array end address
1312 1315
1313 1316 __ enter(); // required for proper stackwalking of RuntimeStub frame
1314 1317 __ movptr(from , Address(rsp, 8+0)); // from
1315 1318 __ movptr(to , Address(rsp, 8+4)); // to
1316 1319 __ movl2ptr(count, Address(rsp, 8+8)); // count
1317 1320
1318 1321 *entry = __ pc(); // Entry point from generic arraycopy stub.
1319 1322 BLOCK_COMMENT("Entry:");
1320 1323
1321 1324 // arrays overlap test
1322 1325 __ cmpptr(to, from);
1323 1326 RuntimeAddress nooverlap(nooverlap_target);
1324 1327 __ jump_cc(Assembler::belowEqual, nooverlap);
1325 1328 __ lea(end_from, Address(from, count, Address::times_8, 0));
1326 1329 __ cmpptr(to, end_from);
1327 1330 __ movptr(from, Address(rsp, 8)); // from
1328 1331 __ jump_cc(Assembler::aboveEqual, nooverlap);
1329 1332
1330 1333 __ jmpb(L_copy_8_bytes);
1331 1334
1332 1335 __ align(OptoLoopAlignment);
1333 1336 __ BIND(L_copy_8_bytes_loop);
1334 1337 if (VM_Version::supports_mmx()) {
1335 1338 if (UseXMMForArrayCopy) {
1336 1339 __ movq(xmm0, Address(from, count, Address::times_8));
1337 1340 __ movq(Address(to, count, Address::times_8), xmm0);
1338 1341 } else {
1339 1342 __ movq(mmx0, Address(from, count, Address::times_8));
1340 1343 __ movq(Address(to, count, Address::times_8), mmx0);
1341 1344 }
1342 1345 } else {
1343 1346 __ fild_d(Address(from, count, Address::times_8));
1344 1347 __ fistp_d(Address(to, count, Address::times_8));
1345 1348 }
1346 1349 __ BIND(L_copy_8_bytes);
1347 1350 __ decrement(count);
1348 1351 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
1349 1352
1350 1353 if (VM_Version::supports_mmx() && !UseXMMForArrayCopy) {
1351 1354 __ emms();
1352 1355 }
1353 1356 inc_copy_counter_np(T_LONG);
1354 1357 __ leave(); // required for proper stackwalking of RuntimeStub frame
1355 1358 __ xorptr(rax, rax); // return 0
1356 1359 __ ret(0);
1357 1360 return start;
1358 1361 }
1359 1362
1360 1363
1361 1364 // Helper for generating a dynamic type check.
1362 1365 // The sub_klass must be one of {rbx, rdx, rsi}.
1363 1366 // The temp is killed.
1364 1367 void generate_type_check(Register sub_klass,
1365 1368 Address& super_check_offset_addr,
1366 1369 Address& super_klass_addr,
1367 1370 Register temp,
1368 1371 Label* L_success, Label* L_failure) {
1369 1372 BLOCK_COMMENT("type_check:");
1370 1373
1371 1374 Label L_fallthrough;
1372 1375 #define LOCAL_JCC(assembler_con, label_ptr) \
1373 1376 if (label_ptr != NULL) __ jcc(assembler_con, *(label_ptr)); \
1374 1377 else __ jcc(assembler_con, L_fallthrough) /*omit semi*/
1375 1378
1376 1379 // The following is a strange variation of the fast path which requires
1377 1380 // one less register, because needed values are on the argument stack.
1378 1381 // __ check_klass_subtype_fast_path(sub_klass, *super_klass*, temp,
1379 1382 // L_success, L_failure, NULL);
1380 1383 assert_different_registers(sub_klass, temp);
1381 1384
1382 1385 int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
1383 1386 Klass::secondary_super_cache_offset_in_bytes());
1384 1387
1385 1388 // if the pointers are equal, we are done (e.g., String[] elements)
1386 1389 __ cmpptr(sub_klass, super_klass_addr);
1387 1390 LOCAL_JCC(Assembler::equal, L_success);
1388 1391
1389 1392 // check the supertype display:
1390 1393 __ movl2ptr(temp, super_check_offset_addr);
1391 1394 Address super_check_addr(sub_klass, temp, Address::times_1, 0);
1392 1395 __ movptr(temp, super_check_addr); // load displayed supertype
1393 1396 __ cmpptr(temp, super_klass_addr); // test the super type
1394 1397 LOCAL_JCC(Assembler::equal, L_success);
1395 1398
1396 1399 // if it was a primary super, we can just fail immediately
1397 1400 __ cmpl(super_check_offset_addr, sc_offset);
1398 1401 LOCAL_JCC(Assembler::notEqual, L_failure);
1399 1402
1400 1403 // The repne_scan instruction uses fixed registers, which will get spilled.
1401 1404 // We happen to know this works best when super_klass is in rax.
1402 1405 Register super_klass = temp;
1403 1406 __ movptr(super_klass, super_klass_addr);
1404 1407 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg,
1405 1408 L_success, L_failure);
1406 1409
1407 1410 __ bind(L_fallthrough);
1408 1411
1409 1412 if (L_success == NULL) { BLOCK_COMMENT("L_success:"); }
1410 1413 if (L_failure == NULL) { BLOCK_COMMENT("L_failure:"); }
1411 1414
1412 1415 #undef LOCAL_JCC
1413 1416 }
1414 1417
1415 1418 //
1416 1419 // Generate checkcasting array copy stub
1417 1420 //
1418 1421 // Input:
1419 1422 // 4(rsp) - source array address
1420 1423 // 8(rsp) - destination array address
1421 1424 // 12(rsp) - element count, can be zero
1422 1425 // 16(rsp) - size_t ckoff (super_check_offset)
1423 1426 // 20(rsp) - oop ckval (super_klass)
1424 1427 //
1425 1428 // Output:
1426 1429 // rax, == 0 - success
1427 1430 // rax, == -1^K - failure, where K is partial transfer count
1428 1431 //
1429 1432 address generate_checkcast_copy(const char *name, address* entry) {
1430 1433 __ align(CodeEntryAlignment);
1431 1434 StubCodeMark mark(this, "StubRoutines", name);
1432 1435 address start = __ pc();
1433 1436
1434 1437 Label L_load_element, L_store_element, L_do_card_marks, L_done;
1435 1438
1436 1439 // register use:
1437 1440 // rax, rdx, rcx -- loop control (end_from, end_to, count)
1438 1441 // rdi, rsi -- element access (oop, klass)
1439 1442 // rbx, -- temp
1440 1443 const Register from = rax; // source array address
1441 1444 const Register to = rdx; // destination array address
1442 1445 const Register length = rcx; // elements count
1443 1446 const Register elem = rdi; // each oop copied
1444 1447 const Register elem_klass = rsi; // each elem._klass (sub_klass)
1445 1448 const Register temp = rbx; // lone remaining temp
1446 1449
1447 1450 __ enter(); // required for proper stackwalking of RuntimeStub frame
1448 1451
1449 1452 __ push(rsi);
1450 1453 __ push(rdi);
1451 1454 __ push(rbx);
1452 1455
1453 1456 Address from_arg(rsp, 16+ 4); // from
↓ open down ↓ |
299 lines elided |
↑ open up ↑ |
1454 1457 Address to_arg(rsp, 16+ 8); // to
1455 1458 Address length_arg(rsp, 16+12); // elements count
1456 1459 Address ckoff_arg(rsp, 16+16); // super_check_offset
1457 1460 Address ckval_arg(rsp, 16+20); // super_klass
1458 1461
1459 1462 // Load up:
1460 1463 __ movptr(from, from_arg);
1461 1464 __ movptr(to, to_arg);
1462 1465 __ movl2ptr(length, length_arg);
1463 1466
1464 - *entry = __ pc(); // Entry point from generic arraycopy stub.
1465 - BLOCK_COMMENT("Entry:");
1467 + if (entry != NULL) {
1468 + *entry = __ pc(); // Entry point from generic arraycopy stub.
1469 + BLOCK_COMMENT("Entry:");
1470 + }
1466 1471
1467 1472 //---------------------------------------------------------------
1468 1473 // Assembler stub will be used for this call to arraycopy
1469 1474 // if the two arrays are subtypes of Object[] but the
1470 1475 // destination array type is not equal to or a supertype
1471 1476 // of the source type. Each element must be separately
1472 1477 // checked.
1473 1478
1474 1479 // Loop-invariant addresses. They are exclusive end pointers.
1475 1480 Address end_from_addr(from, length, Address::times_ptr, 0);
1476 1481 Address end_to_addr(to, length, Address::times_ptr, 0);
1477 1482
1478 1483 Register end_from = from; // re-use
1479 1484 Register end_to = to; // re-use
1480 1485 Register count = length; // re-use
1481 1486
1482 1487 // Loop-variant addresses. They assume post-incremented count < 0.
1483 1488 Address from_element_addr(end_from, count, Address::times_ptr, 0);
1484 1489 Address to_element_addr(end_to, count, Address::times_ptr, 0);
1485 1490 Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes());
1486 1491
1487 1492 // Copy from low to high addresses, indexed from the end of each array.
1488 1493 gen_write_ref_array_pre_barrier(to, count);
1489 1494 __ lea(end_from, end_from_addr);
1490 1495 __ lea(end_to, end_to_addr);
1491 1496 assert(length == count, ""); // else fix next line:
1492 1497 __ negptr(count); // negate and test the length
1493 1498 __ jccb(Assembler::notZero, L_load_element);
1494 1499
1495 1500 // Empty array: Nothing to do.
1496 1501 __ xorptr(rax, rax); // return 0 on (trivial) success
1497 1502 __ jmp(L_done);
1498 1503
1499 1504 // ======== begin loop ========
1500 1505 // (Loop is rotated; its entry is L_load_element.)
1501 1506 // Loop control:
1502 1507 // for (count = -count; count != 0; count++)
1503 1508 // Base pointers src, dst are biased by 8*count,to last element.
1504 1509 __ align(OptoLoopAlignment);
1505 1510
1506 1511 __ BIND(L_store_element);
1507 1512 __ movptr(to_element_addr, elem); // store the oop
1508 1513 __ increment(count); // increment the count toward zero
1509 1514 __ jccb(Assembler::zero, L_do_card_marks);
1510 1515
1511 1516 // ======== loop entry is here ========
1512 1517 __ BIND(L_load_element);
1513 1518 __ movptr(elem, from_element_addr); // load the oop
1514 1519 __ testptr(elem, elem);
1515 1520 __ jccb(Assembler::zero, L_store_element);
1516 1521
1517 1522 // (Could do a trick here: Remember last successful non-null
1518 1523 // element stored and make a quick oop equality check on it.)
1519 1524
1520 1525 __ movptr(elem_klass, elem_klass_addr); // query the object klass
1521 1526 generate_type_check(elem_klass, ckoff_arg, ckval_arg, temp,
1522 1527 &L_store_element, NULL);
1523 1528 // (On fall-through, we have failed the element type check.)
1524 1529 // ======== end loop ========
1525 1530
1526 1531 // It was a real error; we must depend on the caller to finish the job.
1527 1532 // Register "count" = -1 * number of *remaining* oops, length_arg = *total* oops.
1528 1533 // Emit GC store barriers for the oops we have copied (length_arg + count),
1529 1534 // and report their number to the caller.
1530 1535 __ addl(count, length_arg); // transfers = (length - remaining)
1531 1536 __ movl2ptr(rax, count); // save the value
1532 1537 __ notptr(rax); // report (-1^K) to caller
1533 1538 __ movptr(to, to_arg); // reload
1534 1539 assert_different_registers(to, count, rax);
1535 1540 gen_write_ref_array_post_barrier(to, count);
1536 1541 __ jmpb(L_done);
1537 1542
1538 1543 // Come here on success only.
1539 1544 __ BIND(L_do_card_marks);
1540 1545 __ movl2ptr(count, length_arg);
1541 1546 __ movptr(to, to_arg); // reload
1542 1547 gen_write_ref_array_post_barrier(to, count);
1543 1548 __ xorptr(rax, rax); // return 0 on success
1544 1549
1545 1550 // Common exit point (success or failure).
1546 1551 __ BIND(L_done);
1547 1552 __ pop(rbx);
1548 1553 __ pop(rdi);
1549 1554 __ pop(rsi);
1550 1555 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr);
1551 1556 __ leave(); // required for proper stackwalking of RuntimeStub frame
1552 1557 __ ret(0);
1553 1558
1554 1559 return start;
1555 1560 }
1556 1561
1557 1562 //
1558 1563 // Generate 'unsafe' array copy stub
1559 1564 // Though just as safe as the other stubs, it takes an unscaled
1560 1565 // size_t argument instead of an element count.
1561 1566 //
1562 1567 // Input:
1563 1568 // 4(rsp) - source array address
1564 1569 // 8(rsp) - destination array address
1565 1570 // 12(rsp) - byte count, can be zero
1566 1571 //
1567 1572 // Output:
1568 1573 // rax, == 0 - success
1569 1574 // rax, == -1 - need to call System.arraycopy
1570 1575 //
1571 1576 // Examines the alignment of the operands and dispatches
1572 1577 // to a long, int, short, or byte copy loop.
1573 1578 //
1574 1579 address generate_unsafe_copy(const char *name,
1575 1580 address byte_copy_entry,
1576 1581 address short_copy_entry,
1577 1582 address int_copy_entry,
1578 1583 address long_copy_entry) {
1579 1584
1580 1585 Label L_long_aligned, L_int_aligned, L_short_aligned;
1581 1586
1582 1587 __ align(CodeEntryAlignment);
1583 1588 StubCodeMark mark(this, "StubRoutines", name);
1584 1589 address start = __ pc();
1585 1590
1586 1591 const Register from = rax; // source array address
1587 1592 const Register to = rdx; // destination array address
1588 1593 const Register count = rcx; // elements count
1589 1594
1590 1595 __ enter(); // required for proper stackwalking of RuntimeStub frame
1591 1596 __ push(rsi);
1592 1597 __ push(rdi);
1593 1598 Address from_arg(rsp, 12+ 4); // from
1594 1599 Address to_arg(rsp, 12+ 8); // to
1595 1600 Address count_arg(rsp, 12+12); // byte count
1596 1601
1597 1602 // Load up:
1598 1603 __ movptr(from , from_arg);
1599 1604 __ movptr(to , to_arg);
1600 1605 __ movl2ptr(count, count_arg);
1601 1606
1602 1607 // bump this on entry, not on exit:
1603 1608 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr);
1604 1609
1605 1610 const Register bits = rsi;
1606 1611 __ mov(bits, from);
1607 1612 __ orptr(bits, to);
1608 1613 __ orptr(bits, count);
1609 1614
1610 1615 __ testl(bits, BytesPerLong-1);
1611 1616 __ jccb(Assembler::zero, L_long_aligned);
1612 1617
1613 1618 __ testl(bits, BytesPerInt-1);
1614 1619 __ jccb(Assembler::zero, L_int_aligned);
1615 1620
1616 1621 __ testl(bits, BytesPerShort-1);
1617 1622 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry));
1618 1623
1619 1624 __ BIND(L_short_aligned);
1620 1625 __ shrptr(count, LogBytesPerShort); // size => short_count
1621 1626 __ movl(count_arg, count); // update 'count'
1622 1627 __ jump(RuntimeAddress(short_copy_entry));
1623 1628
1624 1629 __ BIND(L_int_aligned);
1625 1630 __ shrptr(count, LogBytesPerInt); // size => int_count
1626 1631 __ movl(count_arg, count); // update 'count'
1627 1632 __ jump(RuntimeAddress(int_copy_entry));
1628 1633
1629 1634 __ BIND(L_long_aligned);
1630 1635 __ shrptr(count, LogBytesPerLong); // size => qword_count
1631 1636 __ movl(count_arg, count); // update 'count'
1632 1637 __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it.
1633 1638 __ pop(rsi);
1634 1639 __ jump(RuntimeAddress(long_copy_entry));
1635 1640
1636 1641 return start;
1637 1642 }
1638 1643
1639 1644
1640 1645 // Perform range checks on the proposed arraycopy.
1641 1646 // Smashes src_pos and dst_pos. (Uses them up for temps.)
1642 1647 void arraycopy_range_checks(Register src,
1643 1648 Register src_pos,
1644 1649 Register dst,
1645 1650 Register dst_pos,
1646 1651 Address& length,
1647 1652 Label& L_failed) {
1648 1653 BLOCK_COMMENT("arraycopy_range_checks:");
1649 1654 const Register src_end = src_pos; // source array end position
1650 1655 const Register dst_end = dst_pos; // destination array end position
1651 1656 __ addl(src_end, length); // src_pos + length
1652 1657 __ addl(dst_end, length); // dst_pos + length
1653 1658
1654 1659 // if (src_pos + length > arrayOop(src)->length() ) FAIL;
1655 1660 __ cmpl(src_end, Address(src, arrayOopDesc::length_offset_in_bytes()));
1656 1661 __ jcc(Assembler::above, L_failed);
1657 1662
1658 1663 // if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
1659 1664 __ cmpl(dst_end, Address(dst, arrayOopDesc::length_offset_in_bytes()));
1660 1665 __ jcc(Assembler::above, L_failed);
1661 1666
1662 1667 BLOCK_COMMENT("arraycopy_range_checks done");
1663 1668 }
1664 1669
1665 1670
1666 1671 //
1667 1672 // Generate generic array copy stubs
1668 1673 //
1669 1674 // Input:
1670 1675 // 4(rsp) - src oop
1671 1676 // 8(rsp) - src_pos
1672 1677 // 12(rsp) - dst oop
1673 1678 // 16(rsp) - dst_pos
1674 1679 // 20(rsp) - element count
1675 1680 //
1676 1681 // Output:
1677 1682 // rax, == 0 - success
1678 1683 // rax, == -1^K - failure, where K is partial transfer count
1679 1684 //
1680 1685 address generate_generic_copy(const char *name,
1681 1686 address entry_jbyte_arraycopy,
1682 1687 address entry_jshort_arraycopy,
1683 1688 address entry_jint_arraycopy,
1684 1689 address entry_oop_arraycopy,
1685 1690 address entry_jlong_arraycopy,
1686 1691 address entry_checkcast_arraycopy) {
1687 1692 Label L_failed, L_failed_0, L_objArray;
1688 1693
1689 1694 { int modulus = CodeEntryAlignment;
1690 1695 int target = modulus - 5; // 5 = sizeof jmp(L_failed)
1691 1696 int advance = target - (__ offset() % modulus);
1692 1697 if (advance < 0) advance += modulus;
1693 1698 if (advance > 0) __ nop(advance);
1694 1699 }
1695 1700 StubCodeMark mark(this, "StubRoutines", name);
1696 1701
1697 1702 // Short-hop target to L_failed. Makes for denser prologue code.
1698 1703 __ BIND(L_failed_0);
1699 1704 __ jmp(L_failed);
1700 1705 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed");
1701 1706
1702 1707 __ align(CodeEntryAlignment);
1703 1708 address start = __ pc();
1704 1709
1705 1710 __ enter(); // required for proper stackwalking of RuntimeStub frame
1706 1711 __ push(rsi);
1707 1712 __ push(rdi);
1708 1713
1709 1714 // bump this on entry, not on exit:
1710 1715 inc_counter_np(SharedRuntime::_generic_array_copy_ctr);
1711 1716
1712 1717 // Input values
1713 1718 Address SRC (rsp, 12+ 4);
1714 1719 Address SRC_POS (rsp, 12+ 8);
1715 1720 Address DST (rsp, 12+12);
1716 1721 Address DST_POS (rsp, 12+16);
1717 1722 Address LENGTH (rsp, 12+20);
1718 1723
1719 1724 //-----------------------------------------------------------------------
1720 1725 // Assembler stub will be used for this call to arraycopy
1721 1726 // if the following conditions are met:
1722 1727 //
1723 1728 // (1) src and dst must not be null.
1724 1729 // (2) src_pos must not be negative.
1725 1730 // (3) dst_pos must not be negative.
1726 1731 // (4) length must not be negative.
1727 1732 // (5) src klass and dst klass should be the same and not NULL.
1728 1733 // (6) src and dst should be arrays.
1729 1734 // (7) src_pos + length must not exceed length of src.
1730 1735 // (8) dst_pos + length must not exceed length of dst.
1731 1736 //
1732 1737
1733 1738 const Register src = rax; // source array oop
1734 1739 const Register src_pos = rsi;
1735 1740 const Register dst = rdx; // destination array oop
1736 1741 const Register dst_pos = rdi;
1737 1742 const Register length = rcx; // transfer count
1738 1743
1739 1744 // if (src == NULL) return -1;
1740 1745 __ movptr(src, SRC); // src oop
1741 1746 __ testptr(src, src);
1742 1747 __ jccb(Assembler::zero, L_failed_0);
1743 1748
1744 1749 // if (src_pos < 0) return -1;
1745 1750 __ movl2ptr(src_pos, SRC_POS); // src_pos
1746 1751 __ testl(src_pos, src_pos);
1747 1752 __ jccb(Assembler::negative, L_failed_0);
1748 1753
1749 1754 // if (dst == NULL) return -1;
1750 1755 __ movptr(dst, DST); // dst oop
1751 1756 __ testptr(dst, dst);
1752 1757 __ jccb(Assembler::zero, L_failed_0);
1753 1758
1754 1759 // if (dst_pos < 0) return -1;
1755 1760 __ movl2ptr(dst_pos, DST_POS); // dst_pos
1756 1761 __ testl(dst_pos, dst_pos);
1757 1762 __ jccb(Assembler::negative, L_failed_0);
1758 1763
1759 1764 // if (length < 0) return -1;
1760 1765 __ movl2ptr(length, LENGTH); // length
1761 1766 __ testl(length, length);
1762 1767 __ jccb(Assembler::negative, L_failed_0);
1763 1768
1764 1769 // if (src->klass() == NULL) return -1;
1765 1770 Address src_klass_addr(src, oopDesc::klass_offset_in_bytes());
1766 1771 Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes());
1767 1772 const Register rcx_src_klass = rcx; // array klass
1768 1773 __ movptr(rcx_src_klass, Address(src, oopDesc::klass_offset_in_bytes()));
1769 1774
1770 1775 #ifdef ASSERT
1771 1776 // assert(src->klass() != NULL);
1772 1777 BLOCK_COMMENT("assert klasses not null");
1773 1778 { Label L1, L2;
1774 1779 __ testptr(rcx_src_klass, rcx_src_klass);
1775 1780 __ jccb(Assembler::notZero, L2); // it is broken if klass is NULL
1776 1781 __ bind(L1);
1777 1782 __ stop("broken null klass");
1778 1783 __ bind(L2);
1779 1784 __ cmpptr(dst_klass_addr, (int32_t)NULL_WORD);
1780 1785 __ jccb(Assembler::equal, L1); // this would be broken also
1781 1786 BLOCK_COMMENT("assert done");
1782 1787 }
1783 1788 #endif //ASSERT
1784 1789
1785 1790 // Load layout helper (32-bits)
1786 1791 //
1787 1792 // |array_tag| | header_size | element_type | |log2_element_size|
1788 1793 // 32 30 24 16 8 2 0
1789 1794 //
1790 1795 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
1791 1796 //
1792 1797
1793 1798 int lh_offset = klassOopDesc::header_size() * HeapWordSize +
1794 1799 Klass::layout_helper_offset_in_bytes();
1795 1800 Address src_klass_lh_addr(rcx_src_klass, lh_offset);
1796 1801
1797 1802 // Handle objArrays completely differently...
1798 1803 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
1799 1804 __ cmpl(src_klass_lh_addr, objArray_lh);
1800 1805 __ jcc(Assembler::equal, L_objArray);
1801 1806
1802 1807 // if (src->klass() != dst->klass()) return -1;
1803 1808 __ cmpptr(rcx_src_klass, dst_klass_addr);
1804 1809 __ jccb(Assembler::notEqual, L_failed_0);
1805 1810
1806 1811 const Register rcx_lh = rcx; // layout helper
1807 1812 assert(rcx_lh == rcx_src_klass, "known alias");
1808 1813 __ movl(rcx_lh, src_klass_lh_addr);
1809 1814
1810 1815 // if (!src->is_Array()) return -1;
1811 1816 __ cmpl(rcx_lh, Klass::_lh_neutral_value);
1812 1817 __ jcc(Assembler::greaterEqual, L_failed_0); // signed cmp
1813 1818
1814 1819 // At this point, it is known to be a typeArray (array_tag 0x3).
1815 1820 #ifdef ASSERT
1816 1821 { Label L;
1817 1822 __ cmpl(rcx_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift));
1818 1823 __ jcc(Assembler::greaterEqual, L); // signed cmp
1819 1824 __ stop("must be a primitive array");
1820 1825 __ bind(L);
1821 1826 }
1822 1827 #endif
1823 1828
1824 1829 assert_different_registers(src, src_pos, dst, dst_pos, rcx_lh);
1825 1830 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed);
1826 1831
1827 1832 // typeArrayKlass
1828 1833 //
1829 1834 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
1830 1835 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
1831 1836 //
1832 1837 const Register rsi_offset = rsi; // array offset
1833 1838 const Register src_array = src; // src array offset
1834 1839 const Register dst_array = dst; // dst array offset
1835 1840 const Register rdi_elsize = rdi; // log2 element size
1836 1841
1837 1842 __ mov(rsi_offset, rcx_lh);
1838 1843 __ shrptr(rsi_offset, Klass::_lh_header_size_shift);
1839 1844 __ andptr(rsi_offset, Klass::_lh_header_size_mask); // array_offset
1840 1845 __ addptr(src_array, rsi_offset); // src array offset
1841 1846 __ addptr(dst_array, rsi_offset); // dst array offset
1842 1847 __ andptr(rcx_lh, Klass::_lh_log2_element_size_mask); // log2 elsize
1843 1848
1844 1849 // next registers should be set before the jump to corresponding stub
1845 1850 const Register from = src; // source array address
1846 1851 const Register to = dst; // destination array address
1847 1852 const Register count = rcx; // elements count
1848 1853 // some of them should be duplicated on stack
1849 1854 #define FROM Address(rsp, 12+ 4)
1850 1855 #define TO Address(rsp, 12+ 8) // Not used now
1851 1856 #define COUNT Address(rsp, 12+12) // Only for oop arraycopy
1852 1857
1853 1858 BLOCK_COMMENT("scale indexes to element size");
1854 1859 __ movl2ptr(rsi, SRC_POS); // src_pos
1855 1860 __ shlptr(rsi); // src_pos << rcx (log2 elsize)
1856 1861 assert(src_array == from, "");
1857 1862 __ addptr(from, rsi); // from = src_array + SRC_POS << log2 elsize
1858 1863 __ movl2ptr(rdi, DST_POS); // dst_pos
1859 1864 __ shlptr(rdi); // dst_pos << rcx (log2 elsize)
1860 1865 assert(dst_array == to, "");
1861 1866 __ addptr(to, rdi); // to = dst_array + DST_POS << log2 elsize
1862 1867 __ movptr(FROM, from); // src_addr
1863 1868 __ mov(rdi_elsize, rcx_lh); // log2 elsize
1864 1869 __ movl2ptr(count, LENGTH); // elements count
1865 1870
1866 1871 BLOCK_COMMENT("choose copy loop based on element size");
1867 1872 __ cmpl(rdi_elsize, 0);
1868 1873
1869 1874 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jbyte_arraycopy));
1870 1875 __ cmpl(rdi_elsize, LogBytesPerShort);
1871 1876 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jshort_arraycopy));
1872 1877 __ cmpl(rdi_elsize, LogBytesPerInt);
1873 1878 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jint_arraycopy));
1874 1879 #ifdef ASSERT
1875 1880 __ cmpl(rdi_elsize, LogBytesPerLong);
1876 1881 __ jccb(Assembler::notEqual, L_failed);
1877 1882 #endif
1878 1883 __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it.
1879 1884 __ pop(rsi);
1880 1885 __ jump(RuntimeAddress(entry_jlong_arraycopy));
1881 1886
1882 1887 __ BIND(L_failed);
1883 1888 __ xorptr(rax, rax);
1884 1889 __ notptr(rax); // return -1
1885 1890 __ pop(rdi);
1886 1891 __ pop(rsi);
1887 1892 __ leave(); // required for proper stackwalking of RuntimeStub frame
1888 1893 __ ret(0);
1889 1894
1890 1895 // objArrayKlass
1891 1896 __ BIND(L_objArray);
1892 1897 // live at this point: rcx_src_klass, src[_pos], dst[_pos]
1893 1898
1894 1899 Label L_plain_copy, L_checkcast_copy;
1895 1900 // test array classes for subtyping
1896 1901 __ cmpptr(rcx_src_klass, dst_klass_addr); // usual case is exact equality
1897 1902 __ jccb(Assembler::notEqual, L_checkcast_copy);
1898 1903
1899 1904 // Identically typed arrays can be copied without element-wise checks.
1900 1905 assert_different_registers(src, src_pos, dst, dst_pos, rcx_src_klass);
1901 1906 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed);
1902 1907
1903 1908 __ BIND(L_plain_copy);
1904 1909 __ movl2ptr(count, LENGTH); // elements count
1905 1910 __ movl2ptr(src_pos, SRC_POS); // reload src_pos
1906 1911 __ lea(from, Address(src, src_pos, Address::times_ptr,
1907 1912 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
1908 1913 __ movl2ptr(dst_pos, DST_POS); // reload dst_pos
1909 1914 __ lea(to, Address(dst, dst_pos, Address::times_ptr,
1910 1915 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
1911 1916 __ movptr(FROM, from); // src_addr
1912 1917 __ movptr(TO, to); // dst_addr
1913 1918 __ movl(COUNT, count); // count
1914 1919 __ jump(RuntimeAddress(entry_oop_arraycopy));
1915 1920
1916 1921 __ BIND(L_checkcast_copy);
1917 1922 // live at this point: rcx_src_klass, dst[_pos], src[_pos]
1918 1923 {
1919 1924 // Handy offsets:
1920 1925 int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
1921 1926 objArrayKlass::element_klass_offset_in_bytes());
1922 1927 int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
1923 1928 Klass::super_check_offset_offset_in_bytes());
1924 1929
1925 1930 Register rsi_dst_klass = rsi;
1926 1931 Register rdi_temp = rdi;
1927 1932 assert(rsi_dst_klass == src_pos, "expected alias w/ src_pos");
1928 1933 assert(rdi_temp == dst_pos, "expected alias w/ dst_pos");
1929 1934 Address dst_klass_lh_addr(rsi_dst_klass, lh_offset);
1930 1935
1931 1936 // Before looking at dst.length, make sure dst is also an objArray.
1932 1937 __ movptr(rsi_dst_klass, dst_klass_addr);
1933 1938 __ cmpl(dst_klass_lh_addr, objArray_lh);
1934 1939 __ jccb(Assembler::notEqual, L_failed);
1935 1940
1936 1941 // It is safe to examine both src.length and dst.length.
1937 1942 __ movl2ptr(src_pos, SRC_POS); // reload rsi
1938 1943 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed);
1939 1944 // (Now src_pos and dst_pos are killed, but not src and dst.)
1940 1945
1941 1946 // We'll need this temp (don't forget to pop it after the type check).
1942 1947 __ push(rbx);
1943 1948 Register rbx_src_klass = rbx;
1944 1949
1945 1950 __ mov(rbx_src_klass, rcx_src_klass); // spill away from rcx
1946 1951 __ movptr(rsi_dst_klass, dst_klass_addr);
1947 1952 Address super_check_offset_addr(rsi_dst_klass, sco_offset);
1948 1953 Label L_fail_array_check;
1949 1954 generate_type_check(rbx_src_klass,
1950 1955 super_check_offset_addr, dst_klass_addr,
1951 1956 rdi_temp, NULL, &L_fail_array_check);
1952 1957 // (On fall-through, we have passed the array type check.)
1953 1958 __ pop(rbx);
1954 1959 __ jmp(L_plain_copy);
1955 1960
1956 1961 __ BIND(L_fail_array_check);
1957 1962 // Reshuffle arguments so we can call checkcast_arraycopy:
1958 1963
1959 1964 // match initial saves for checkcast_arraycopy
1960 1965 // push(rsi); // already done; see above
1961 1966 // push(rdi); // already done; see above
1962 1967 // push(rbx); // already done; see above
1963 1968
1964 1969 // Marshal outgoing arguments now, freeing registers.
1965 1970 Address from_arg(rsp, 16+ 4); // from
1966 1971 Address to_arg(rsp, 16+ 8); // to
1967 1972 Address length_arg(rsp, 16+12); // elements count
1968 1973 Address ckoff_arg(rsp, 16+16); // super_check_offset
1969 1974 Address ckval_arg(rsp, 16+20); // super_klass
1970 1975
1971 1976 Address SRC_POS_arg(rsp, 16+ 8);
1972 1977 Address DST_POS_arg(rsp, 16+16);
1973 1978 Address LENGTH_arg(rsp, 16+20);
1974 1979 // push rbx, changed the incoming offsets (why not just use rbp,??)
1975 1980 // assert(SRC_POS_arg.disp() == SRC_POS.disp() + 4, "");
1976 1981
1977 1982 __ movptr(rbx, Address(rsi_dst_klass, ek_offset));
1978 1983 __ movl2ptr(length, LENGTH_arg); // reload elements count
1979 1984 __ movl2ptr(src_pos, SRC_POS_arg); // reload src_pos
1980 1985 __ movl2ptr(dst_pos, DST_POS_arg); // reload dst_pos
1981 1986
1982 1987 __ movptr(ckval_arg, rbx); // destination element type
1983 1988 __ movl(rbx, Address(rbx, sco_offset));
1984 1989 __ movl(ckoff_arg, rbx); // corresponding class check offset
1985 1990
1986 1991 __ movl(length_arg, length); // outgoing length argument
1987 1992
1988 1993 __ lea(from, Address(src, src_pos, Address::times_ptr,
1989 1994 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
1990 1995 __ movptr(from_arg, from);
1991 1996
1992 1997 __ lea(to, Address(dst, dst_pos, Address::times_ptr,
1993 1998 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
1994 1999 __ movptr(to_arg, to);
1995 2000 __ jump(RuntimeAddress(entry_checkcast_arraycopy));
1996 2001 }
1997 2002
1998 2003 return start;
1999 2004 }
2000 2005
2001 2006 void generate_arraycopy_stubs() {
2002 2007 address entry;
2003 2008 address entry_jbyte_arraycopy;
2004 2009 address entry_jshort_arraycopy;
2005 2010 address entry_jint_arraycopy;
2006 2011 address entry_oop_arraycopy;
2007 2012 address entry_jlong_arraycopy;
2008 2013 address entry_checkcast_arraycopy;
2009 2014
2010 2015 StubRoutines::_arrayof_jbyte_disjoint_arraycopy =
2011 2016 generate_disjoint_copy(T_BYTE, true, Address::times_1, &entry,
2012 2017 "arrayof_jbyte_disjoint_arraycopy");
2013 2018 StubRoutines::_arrayof_jbyte_arraycopy =
2014 2019 generate_conjoint_copy(T_BYTE, true, Address::times_1, entry,
2015 2020 NULL, "arrayof_jbyte_arraycopy");
2016 2021 StubRoutines::_jbyte_disjoint_arraycopy =
2017 2022 generate_disjoint_copy(T_BYTE, false, Address::times_1, &entry,
2018 2023 "jbyte_disjoint_arraycopy");
2019 2024 StubRoutines::_jbyte_arraycopy =
2020 2025 generate_conjoint_copy(T_BYTE, false, Address::times_1, entry,
2021 2026 &entry_jbyte_arraycopy, "jbyte_arraycopy");
2022 2027
2023 2028 StubRoutines::_arrayof_jshort_disjoint_arraycopy =
2024 2029 generate_disjoint_copy(T_SHORT, true, Address::times_2, &entry,
2025 2030 "arrayof_jshort_disjoint_arraycopy");
2026 2031 StubRoutines::_arrayof_jshort_arraycopy =
2027 2032 generate_conjoint_copy(T_SHORT, true, Address::times_2, entry,
2028 2033 NULL, "arrayof_jshort_arraycopy");
2029 2034 StubRoutines::_jshort_disjoint_arraycopy =
2030 2035 generate_disjoint_copy(T_SHORT, false, Address::times_2, &entry,
2031 2036 "jshort_disjoint_arraycopy");
2032 2037 StubRoutines::_jshort_arraycopy =
2033 2038 generate_conjoint_copy(T_SHORT, false, Address::times_2, entry,
2034 2039 &entry_jshort_arraycopy, "jshort_arraycopy");
2035 2040
2036 2041 // Next arrays are always aligned on 4 bytes at least.
2037 2042 StubRoutines::_jint_disjoint_arraycopy =
2038 2043 generate_disjoint_copy(T_INT, true, Address::times_4, &entry,
2039 2044 "jint_disjoint_arraycopy");
2040 2045 StubRoutines::_jint_arraycopy =
2041 2046 generate_conjoint_copy(T_INT, true, Address::times_4, entry,
2042 2047 &entry_jint_arraycopy, "jint_arraycopy");
2043 2048
2044 2049 StubRoutines::_oop_disjoint_arraycopy =
2045 2050 generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry,
2046 2051 "oop_disjoint_arraycopy");
2047 2052 StubRoutines::_oop_arraycopy =
2048 2053 generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry,
2049 2054 &entry_oop_arraycopy, "oop_arraycopy");
2050 2055
2051 2056 StubRoutines::_jlong_disjoint_arraycopy =
2052 2057 generate_disjoint_long_copy(&entry, "jlong_disjoint_arraycopy");
2053 2058 StubRoutines::_jlong_arraycopy =
2054 2059 generate_conjoint_long_copy(entry, &entry_jlong_arraycopy,
2055 2060 "jlong_arraycopy");
2056 2061
2057 2062 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
2058 2063 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
2059 2064 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
2060 2065 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
2061 2066 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
2062 2067 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
2063 2068
2064 2069 StubRoutines::_arrayof_jint_disjoint_arraycopy =
2065 2070 StubRoutines::_jint_disjoint_arraycopy;
2066 2071 StubRoutines::_arrayof_oop_disjoint_arraycopy =
2067 2072 StubRoutines::_oop_disjoint_arraycopy;
2068 2073 StubRoutines::_arrayof_jlong_disjoint_arraycopy =
2069 2074 StubRoutines::_jlong_disjoint_arraycopy;
2070 2075
2071 2076 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy;
2072 2077 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy;
2073 2078 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy;
2074 2079
2075 2080 StubRoutines::_checkcast_arraycopy =
2076 2081 generate_checkcast_copy("checkcast_arraycopy",
2077 2082 &entry_checkcast_arraycopy);
2078 2083
2079 2084 StubRoutines::_unsafe_arraycopy =
2080 2085 generate_unsafe_copy("unsafe_arraycopy",
2081 2086 entry_jbyte_arraycopy,
2082 2087 entry_jshort_arraycopy,
2083 2088 entry_jint_arraycopy,
2084 2089 entry_jlong_arraycopy);
2085 2090
2086 2091 StubRoutines::_generic_arraycopy =
2087 2092 generate_generic_copy("generic_arraycopy",
2088 2093 entry_jbyte_arraycopy,
2089 2094 entry_jshort_arraycopy,
2090 2095 entry_jint_arraycopy,
2091 2096 entry_oop_arraycopy,
2092 2097 entry_jlong_arraycopy,
2093 2098 entry_checkcast_arraycopy);
2094 2099 }
2095 2100
2096 2101 void generate_math_stubs() {
2097 2102 {
2098 2103 StubCodeMark mark(this, "StubRoutines", "log");
2099 2104 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc();
2100 2105
2101 2106 __ fld_d(Address(rsp, 4));
2102 2107 __ flog();
2103 2108 __ ret(0);
2104 2109 }
2105 2110 {
2106 2111 StubCodeMark mark(this, "StubRoutines", "log10");
2107 2112 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc();
2108 2113
2109 2114 __ fld_d(Address(rsp, 4));
2110 2115 __ flog10();
2111 2116 __ ret(0);
2112 2117 }
2113 2118 {
2114 2119 StubCodeMark mark(this, "StubRoutines", "sin");
2115 2120 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc();
2116 2121
2117 2122 __ fld_d(Address(rsp, 4));
2118 2123 __ trigfunc('s');
2119 2124 __ ret(0);
2120 2125 }
2121 2126 {
2122 2127 StubCodeMark mark(this, "StubRoutines", "cos");
2123 2128 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc();
2124 2129
2125 2130 __ fld_d(Address(rsp, 4));
2126 2131 __ trigfunc('c');
2127 2132 __ ret(0);
2128 2133 }
2129 2134 {
2130 2135 StubCodeMark mark(this, "StubRoutines", "tan");
2131 2136 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc();
2132 2137
2133 2138 __ fld_d(Address(rsp, 4));
2134 2139 __ trigfunc('t');
2135 2140 __ ret(0);
2136 2141 }
2137 2142
2138 2143 // The intrinsic version of these seem to return the same value as
2139 2144 // the strict version.
2140 2145 StubRoutines::_intrinsic_exp = SharedRuntime::dexp;
2141 2146 StubRoutines::_intrinsic_pow = SharedRuntime::dpow;
2142 2147 }
2143 2148
2144 2149 public:
2145 2150 // Information about frame layout at time of blocking runtime call.
2146 2151 // Note that we only have to preserve callee-saved registers since
2147 2152 // the compilers are responsible for supplying a continuation point
2148 2153 // if they expect all registers to be preserved.
2149 2154 enum layout {
2150 2155 thread_off, // last_java_sp
2151 2156 rbp_off, // callee saved register
2152 2157 ret_pc,
2153 2158 framesize
2154 2159 };
2155 2160
2156 2161 private:
2157 2162
2158 2163 #undef __
2159 2164 #define __ masm->
2160 2165
2161 2166 //------------------------------------------------------------------------------------------------------------------------
2162 2167 // Continuation point for throwing of implicit exceptions that are not handled in
2163 2168 // the current activation. Fabricates an exception oop and initiates normal
2164 2169 // exception dispatching in this frame.
2165 2170 //
2166 2171 // Previously the compiler (c2) allowed for callee save registers on Java calls.
2167 2172 // This is no longer true after adapter frames were removed but could possibly
2168 2173 // be brought back in the future if the interpreter code was reworked and it
2169 2174 // was deemed worthwhile. The comment below was left to describe what must
2170 2175 // happen here if callee saves were resurrected. As it stands now this stub
2171 2176 // could actually be a vanilla BufferBlob and have now oopMap at all.
2172 2177 // Since it doesn't make much difference we've chosen to leave it the
2173 2178 // way it was in the callee save days and keep the comment.
2174 2179
2175 2180 // If we need to preserve callee-saved values we need a callee-saved oop map and
2176 2181 // therefore have to make these stubs into RuntimeStubs rather than BufferBlobs.
2177 2182 // If the compiler needs all registers to be preserved between the fault
2178 2183 // point and the exception handler then it must assume responsibility for that in
2179 2184 // AbstractCompiler::continuation_for_implicit_null_exception or
2180 2185 // continuation_for_implicit_division_by_zero_exception. All other implicit
2181 2186 // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are
2182 2187 // either at call sites or otherwise assume that stack unwinding will be initiated,
2183 2188 // so caller saved registers were assumed volatile in the compiler.
2184 2189 address generate_throw_exception(const char* name, address runtime_entry,
2185 2190 bool restore_saved_exception_pc) {
2186 2191
2187 2192 int insts_size = 256;
2188 2193 int locs_size = 32;
2189 2194
2190 2195 CodeBuffer code(name, insts_size, locs_size);
2191 2196 OopMapSet* oop_maps = new OopMapSet();
2192 2197 MacroAssembler* masm = new MacroAssembler(&code);
2193 2198
2194 2199 address start = __ pc();
2195 2200
2196 2201 // This is an inlined and slightly modified version of call_VM
2197 2202 // which has the ability to fetch the return PC out of
2198 2203 // thread-local storage and also sets up last_Java_sp slightly
2199 2204 // differently than the real call_VM
2200 2205 Register java_thread = rbx;
2201 2206 __ get_thread(java_thread);
2202 2207 if (restore_saved_exception_pc) {
2203 2208 __ movptr(rax, Address(java_thread, in_bytes(JavaThread::saved_exception_pc_offset())));
2204 2209 __ push(rax);
2205 2210 }
2206 2211
2207 2212 __ enter(); // required for proper stackwalking of RuntimeStub frame
2208 2213
2209 2214 // pc and rbp, already pushed
2210 2215 __ subptr(rsp, (framesize-2) * wordSize); // prolog
2211 2216
2212 2217 // Frame is now completed as far as size and linkage.
2213 2218
2214 2219 int frame_complete = __ pc() - start;
2215 2220
2216 2221 // push java thread (becomes first argument of C function)
2217 2222 __ movptr(Address(rsp, thread_off * wordSize), java_thread);
2218 2223
2219 2224 // Set up last_Java_sp and last_Java_fp
2220 2225 __ set_last_Java_frame(java_thread, rsp, rbp, NULL);
2221 2226
2222 2227 // Call runtime
2223 2228 BLOCK_COMMENT("call runtime_entry");
2224 2229 __ call(RuntimeAddress(runtime_entry));
2225 2230 // Generate oop map
2226 2231 OopMap* map = new OopMap(framesize, 0);
2227 2232 oop_maps->add_gc_map(__ pc() - start, map);
2228 2233
2229 2234 // restore the thread (cannot use the pushed argument since arguments
2230 2235 // may be overwritten by C code generated by an optimizing compiler);
2231 2236 // however can use the register value directly if it is callee saved.
2232 2237 __ get_thread(java_thread);
2233 2238
2234 2239 __ reset_last_Java_frame(java_thread, true, false);
2235 2240
2236 2241 __ leave(); // required for proper stackwalking of RuntimeStub frame
2237 2242
2238 2243 // check for pending exceptions
2239 2244 #ifdef ASSERT
2240 2245 Label L;
2241 2246 __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
2242 2247 __ jcc(Assembler::notEqual, L);
2243 2248 __ should_not_reach_here();
2244 2249 __ bind(L);
2245 2250 #endif /* ASSERT */
2246 2251 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2247 2252
2248 2253
2249 2254 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, framesize, oop_maps, false);
2250 2255 return stub->entry_point();
2251 2256 }
2252 2257
2253 2258
2254 2259 void create_control_words() {
2255 2260 // Round to nearest, 53-bit mode, exceptions masked
2256 2261 StubRoutines::_fpu_cntrl_wrd_std = 0x027F;
2257 2262 // Round to zero, 53-bit mode, exception mased
2258 2263 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F;
2259 2264 // Round to nearest, 24-bit mode, exceptions masked
2260 2265 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F;
2261 2266 // Round to nearest, 64-bit mode, exceptions masked
2262 2267 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F;
2263 2268 // Round to nearest, 64-bit mode, exceptions masked
2264 2269 StubRoutines::_mxcsr_std = 0x1F80;
2265 2270 // Note: the following two constants are 80-bit values
2266 2271 // layout is critical for correct loading by FPU.
2267 2272 // Bias for strict fp multiply/divide
2268 2273 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000
2269 2274 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000;
2270 2275 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff;
2271 2276 // Un-Bias for strict fp multiply/divide
2272 2277 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000
2273 2278 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000;
2274 2279 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff;
2275 2280 }
2276 2281
2277 2282 //---------------------------------------------------------------------------
2278 2283 // Initialization
2279 2284
2280 2285 void generate_initial() {
2281 2286 // Generates all stubs and initializes the entry points
2282 2287
2283 2288 //------------------------------------------------------------------------------------------------------------------------
2284 2289 // entry points that exist in all platforms
2285 2290 // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than
2286 2291 // the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp.
2287 2292 StubRoutines::_forward_exception_entry = generate_forward_exception();
2288 2293
2289 2294 StubRoutines::_call_stub_entry =
2290 2295 generate_call_stub(StubRoutines::_call_stub_return_address);
2291 2296 // is referenced by megamorphic call
2292 2297 StubRoutines::_catch_exception_entry = generate_catch_exception();
2293 2298
2294 2299 // These are currently used by Solaris/Intel
2295 2300 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
2296 2301
2297 2302 StubRoutines::_handler_for_unsafe_access_entry =
2298 2303 generate_handler_for_unsafe_access();
2299 2304
2300 2305 // platform dependent
2301 2306 create_control_words();
2302 2307
2303 2308 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
2304 2309 StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = generate_verify_fpu_cntrl_wrd();
2305 2310 StubRoutines::_d2i_wrapper = generate_d2i_wrapper(T_INT,
2306 2311 CAST_FROM_FN_PTR(address, SharedRuntime::d2i));
2307 2312 StubRoutines::_d2l_wrapper = generate_d2i_wrapper(T_LONG,
2308 2313 CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
2309 2314 }
2310 2315
2311 2316
2312 2317 void generate_all() {
2313 2318 // Generates all stubs and initializes the entry points
2314 2319
2315 2320 // These entry points require SharedInfo::stack0 to be set up in non-core builds
2316 2321 // and need to be relocatable, so they each fabricate a RuntimeStub internally.
2317 2322 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false);
2318 2323 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
2319 2324 StubRoutines::_throw_ArithmeticException_entry = generate_throw_exception("ArithmeticException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_ArithmeticException), true);
2320 2325 StubRoutines::_throw_NullPointerException_entry = generate_throw_exception("NullPointerException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException), true);
2321 2326 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
2322 2327 StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
2323 2328
2324 2329 //------------------------------------------------------------------------------------------------------------------------
2325 2330 // entry points that are platform specific
2326 2331
2327 2332 // support for verify_oop (must happen after universe_init)
2328 2333 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
2329 2334
2330 2335 // arraycopy stubs used by compilers
2331 2336 generate_arraycopy_stubs();
2332 2337
2333 2338 generate_math_stubs();
2334 2339 }
2335 2340
2336 2341
2337 2342 public:
2338 2343 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
2339 2344 if (all) {
2340 2345 generate_all();
2341 2346 } else {
2342 2347 generate_initial();
2343 2348 }
2344 2349 }
2345 2350 }; // end class declaration
2346 2351
2347 2352
2348 2353 void StubGenerator_generate(CodeBuffer* code, bool all) {
2349 2354 StubGenerator g(code, all);
2350 2355 }
↓ open down ↓ |
875 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX