Print this page
rev 4534 : 8010927: Kitchensink crashed with SIGSEGV, Problematic frame: v ~StubRoutines::checkcast_arraycopy
Summary: Changed gen_write_ref_array_post_barrier() code on x64 to pass start address and number of copied oop elements. In generate_checkcast_copy() skip post barrier code if no elements are copied.
Reviewed-by: roland
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/x86/vm/stubGenerator_x86_64.cpp
+++ new/src/cpu/x86/vm/stubGenerator_x86_64.cpp
1 1 /*
2 2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "asm/assembler.hpp"
27 27 #include "assembler_x86.inline.hpp"
28 28 #include "interpreter/interpreter.hpp"
29 29 #include "nativeInst_x86.hpp"
30 30 #include "oops/instanceOop.hpp"
31 31 #include "oops/methodOop.hpp"
32 32 #include "oops/objArrayKlass.hpp"
33 33 #include "oops/oop.inline.hpp"
34 34 #include "prims/methodHandles.hpp"
35 35 #include "runtime/frame.inline.hpp"
36 36 #include "runtime/handles.inline.hpp"
37 37 #include "runtime/sharedRuntime.hpp"
38 38 #include "runtime/stubCodeGenerator.hpp"
39 39 #include "runtime/stubRoutines.hpp"
40 40 #include "utilities/top.hpp"
41 41 #ifdef TARGET_OS_FAMILY_linux
42 42 # include "thread_linux.inline.hpp"
43 43 #endif
44 44 #ifdef TARGET_OS_FAMILY_solaris
45 45 # include "thread_solaris.inline.hpp"
46 46 #endif
47 47 #ifdef TARGET_OS_FAMILY_windows
48 48 # include "thread_windows.inline.hpp"
49 49 #endif
50 50 #ifdef TARGET_OS_FAMILY_bsd
51 51 # include "thread_bsd.inline.hpp"
52 52 #endif
53 53 #ifdef COMPILER2
54 54 #include "opto/runtime.hpp"
55 55 #endif
56 56
57 57 // Declaration and definition of StubGenerator (no .hpp file).
58 58 // For a more detailed description of the stub routine structure
59 59 // see the comment in stubRoutines.hpp
60 60
61 61 #define __ _masm->
62 62 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
63 63 #define a__ ((Assembler*)_masm)->
64 64
65 65 #ifdef PRODUCT
66 66 #define BLOCK_COMMENT(str) /* nothing */
67 67 #else
68 68 #define BLOCK_COMMENT(str) __ block_comment(str)
69 69 #endif
70 70
71 71 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
72 72 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions
73 73
74 74 // Stub Code definitions
75 75
76 76 static address handle_unsafe_access() {
77 77 JavaThread* thread = JavaThread::current();
78 78 address pc = thread->saved_exception_pc();
79 79 // pc is the instruction which we must emulate
80 80 // doing a no-op is fine: return garbage from the load
81 81 // therefore, compute npc
82 82 address npc = Assembler::locate_next_instruction(pc);
83 83
84 84 // request an async exception
85 85 thread->set_pending_unsafe_access_error();
86 86
87 87 // return address of next instruction to execute
88 88 return npc;
89 89 }
90 90
91 91 class StubGenerator: public StubCodeGenerator {
92 92 private:
93 93
94 94 #ifdef PRODUCT
95 95 #define inc_counter_np(counter) (0)
96 96 #else
97 97 void inc_counter_np_(int& counter) {
98 98 // This can destroy rscratch1 if counter is far from the code cache
99 99 __ incrementl(ExternalAddress((address)&counter));
100 100 }
101 101 #define inc_counter_np(counter) \
102 102 BLOCK_COMMENT("inc_counter " #counter); \
103 103 inc_counter_np_(counter);
104 104 #endif
105 105
106 106 // Call stubs are used to call Java from C
107 107 //
108 108 // Linux Arguments:
109 109 // c_rarg0: call wrapper address address
110 110 // c_rarg1: result address
111 111 // c_rarg2: result type BasicType
112 112 // c_rarg3: method methodOop
113 113 // c_rarg4: (interpreter) entry point address
114 114 // c_rarg5: parameters intptr_t*
115 115 // 16(rbp): parameter size (in words) int
116 116 // 24(rbp): thread Thread*
117 117 //
118 118 // [ return_from_Java ] <--- rsp
119 119 // [ argument word n ]
120 120 // ...
121 121 // -12 [ argument word 1 ]
122 122 // -11 [ saved r15 ] <--- rsp_after_call
123 123 // -10 [ saved r14 ]
124 124 // -9 [ saved r13 ]
125 125 // -8 [ saved r12 ]
126 126 // -7 [ saved rbx ]
127 127 // -6 [ call wrapper ]
128 128 // -5 [ result ]
129 129 // -4 [ result type ]
130 130 // -3 [ method ]
131 131 // -2 [ entry point ]
132 132 // -1 [ parameters ]
133 133 // 0 [ saved rbp ] <--- rbp
134 134 // 1 [ return address ]
135 135 // 2 [ parameter size ]
136 136 // 3 [ thread ]
137 137 //
138 138 // Windows Arguments:
139 139 // c_rarg0: call wrapper address address
140 140 // c_rarg1: result address
141 141 // c_rarg2: result type BasicType
142 142 // c_rarg3: method methodOop
143 143 // 48(rbp): (interpreter) entry point address
144 144 // 56(rbp): parameters intptr_t*
145 145 // 64(rbp): parameter size (in words) int
146 146 // 72(rbp): thread Thread*
147 147 //
148 148 // [ return_from_Java ] <--- rsp
149 149 // [ argument word n ]
150 150 // ...
151 151 // -28 [ argument word 1 ]
152 152 // -27 [ saved xmm15 ] <--- rsp_after_call
153 153 // [ saved xmm7-xmm14 ]
154 154 // -9 [ saved xmm6 ] (each xmm register takes 2 slots)
155 155 // -7 [ saved r15 ]
156 156 // -6 [ saved r14 ]
157 157 // -5 [ saved r13 ]
158 158 // -4 [ saved r12 ]
159 159 // -3 [ saved rdi ]
160 160 // -2 [ saved rsi ]
161 161 // -1 [ saved rbx ]
162 162 // 0 [ saved rbp ] <--- rbp
163 163 // 1 [ return address ]
164 164 // 2 [ call wrapper ]
165 165 // 3 [ result ]
166 166 // 4 [ result type ]
167 167 // 5 [ method ]
168 168 // 6 [ entry point ]
169 169 // 7 [ parameters ]
170 170 // 8 [ parameter size ]
171 171 // 9 [ thread ]
172 172 //
173 173 // Windows reserves the callers stack space for arguments 1-4.
174 174 // We spill c_rarg0-c_rarg3 to this space.
175 175
176 176 // Call stub stack layout word offsets from rbp
177 177 enum call_stub_layout {
178 178 #ifdef _WIN64
179 179 xmm_save_first = 6, // save from xmm6
180 180 xmm_save_last = 15, // to xmm15
181 181 xmm_save_base = -9,
182 182 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27
183 183 r15_off = -7,
184 184 r14_off = -6,
185 185 r13_off = -5,
186 186 r12_off = -4,
187 187 rdi_off = -3,
188 188 rsi_off = -2,
189 189 rbx_off = -1,
190 190 rbp_off = 0,
191 191 retaddr_off = 1,
192 192 call_wrapper_off = 2,
193 193 result_off = 3,
194 194 result_type_off = 4,
195 195 method_off = 5,
196 196 entry_point_off = 6,
197 197 parameters_off = 7,
198 198 parameter_size_off = 8,
199 199 thread_off = 9
200 200 #else
201 201 rsp_after_call_off = -12,
202 202 mxcsr_off = rsp_after_call_off,
203 203 r15_off = -11,
204 204 r14_off = -10,
205 205 r13_off = -9,
206 206 r12_off = -8,
207 207 rbx_off = -7,
208 208 call_wrapper_off = -6,
209 209 result_off = -5,
210 210 result_type_off = -4,
211 211 method_off = -3,
212 212 entry_point_off = -2,
213 213 parameters_off = -1,
214 214 rbp_off = 0,
215 215 retaddr_off = 1,
216 216 parameter_size_off = 2,
217 217 thread_off = 3
218 218 #endif
219 219 };
220 220
221 221 #ifdef _WIN64
222 222 Address xmm_save(int reg) {
223 223 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range");
224 224 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize);
225 225 }
226 226 #endif
227 227
228 228 address generate_call_stub(address& return_address) {
229 229 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 &&
230 230 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off,
231 231 "adjust this code");
232 232 StubCodeMark mark(this, "StubRoutines", "call_stub");
233 233 address start = __ pc();
234 234
235 235 // same as in generate_catch_exception()!
236 236 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);
237 237
238 238 const Address call_wrapper (rbp, call_wrapper_off * wordSize);
239 239 const Address result (rbp, result_off * wordSize);
240 240 const Address result_type (rbp, result_type_off * wordSize);
241 241 const Address method (rbp, method_off * wordSize);
242 242 const Address entry_point (rbp, entry_point_off * wordSize);
243 243 const Address parameters (rbp, parameters_off * wordSize);
244 244 const Address parameter_size(rbp, parameter_size_off * wordSize);
245 245
246 246 // same as in generate_catch_exception()!
247 247 const Address thread (rbp, thread_off * wordSize);
248 248
249 249 const Address r15_save(rbp, r15_off * wordSize);
250 250 const Address r14_save(rbp, r14_off * wordSize);
251 251 const Address r13_save(rbp, r13_off * wordSize);
252 252 const Address r12_save(rbp, r12_off * wordSize);
253 253 const Address rbx_save(rbp, rbx_off * wordSize);
254 254
255 255 // stub code
256 256 __ enter();
257 257 __ subptr(rsp, -rsp_after_call_off * wordSize);
258 258
259 259 // save register parameters
260 260 #ifndef _WIN64
261 261 __ movptr(parameters, c_rarg5); // parameters
262 262 __ movptr(entry_point, c_rarg4); // entry_point
263 263 #endif
264 264
265 265 __ movptr(method, c_rarg3); // method
266 266 __ movl(result_type, c_rarg2); // result type
267 267 __ movptr(result, c_rarg1); // result
268 268 __ movptr(call_wrapper, c_rarg0); // call wrapper
269 269
270 270 // save regs belonging to calling function
271 271 __ movptr(rbx_save, rbx);
272 272 __ movptr(r12_save, r12);
273 273 __ movptr(r13_save, r13);
274 274 __ movptr(r14_save, r14);
275 275 __ movptr(r15_save, r15);
276 276 #ifdef _WIN64
277 277 for (int i = 6; i <= 15; i++) {
278 278 __ movdqu(xmm_save(i), as_XMMRegister(i));
279 279 }
280 280
281 281 const Address rdi_save(rbp, rdi_off * wordSize);
282 282 const Address rsi_save(rbp, rsi_off * wordSize);
283 283
284 284 __ movptr(rsi_save, rsi);
285 285 __ movptr(rdi_save, rdi);
286 286 #else
287 287 const Address mxcsr_save(rbp, mxcsr_off * wordSize);
288 288 {
289 289 Label skip_ldmx;
290 290 __ stmxcsr(mxcsr_save);
291 291 __ movl(rax, mxcsr_save);
292 292 __ andl(rax, MXCSR_MASK); // Only check control and mask bits
293 293 ExternalAddress mxcsr_std(StubRoutines::x86::mxcsr_std());
294 294 __ cmp32(rax, mxcsr_std);
295 295 __ jcc(Assembler::equal, skip_ldmx);
296 296 __ ldmxcsr(mxcsr_std);
297 297 __ bind(skip_ldmx);
298 298 }
299 299 #endif
300 300
301 301 // Load up thread register
302 302 __ movptr(r15_thread, thread);
303 303 __ reinit_heapbase();
304 304
305 305 #ifdef ASSERT
306 306 // make sure we have no pending exceptions
307 307 {
308 308 Label L;
309 309 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
310 310 __ jcc(Assembler::equal, L);
311 311 __ stop("StubRoutines::call_stub: entered with pending exception");
312 312 __ bind(L);
313 313 }
314 314 #endif
315 315
316 316 // pass parameters if any
317 317 BLOCK_COMMENT("pass parameters if any");
318 318 Label parameters_done;
319 319 __ movl(c_rarg3, parameter_size);
320 320 __ testl(c_rarg3, c_rarg3);
321 321 __ jcc(Assembler::zero, parameters_done);
322 322
323 323 Label loop;
324 324 __ movptr(c_rarg2, parameters); // parameter pointer
325 325 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1
326 326 __ BIND(loop);
327 327 __ movptr(rax, Address(c_rarg2, 0));// get parameter
328 328 __ addptr(c_rarg2, wordSize); // advance to next parameter
329 329 __ decrementl(c_rarg1); // decrement counter
330 330 __ push(rax); // pass parameter
331 331 __ jcc(Assembler::notZero, loop);
332 332
333 333 // call Java function
334 334 __ BIND(parameters_done);
335 335 __ movptr(rbx, method); // get methodOop
336 336 __ movptr(c_rarg1, entry_point); // get entry_point
337 337 __ mov(r13, rsp); // set sender sp
338 338 BLOCK_COMMENT("call Java function");
339 339 __ call(c_rarg1);
340 340
341 341 BLOCK_COMMENT("call_stub_return_address:");
342 342 return_address = __ pc();
343 343
344 344 // store result depending on type (everything that is not
345 345 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
346 346 __ movptr(c_rarg0, result);
347 347 Label is_long, is_float, is_double, exit;
348 348 __ movl(c_rarg1, result_type);
349 349 __ cmpl(c_rarg1, T_OBJECT);
350 350 __ jcc(Assembler::equal, is_long);
351 351 __ cmpl(c_rarg1, T_LONG);
352 352 __ jcc(Assembler::equal, is_long);
353 353 __ cmpl(c_rarg1, T_FLOAT);
354 354 __ jcc(Assembler::equal, is_float);
355 355 __ cmpl(c_rarg1, T_DOUBLE);
356 356 __ jcc(Assembler::equal, is_double);
357 357
358 358 // handle T_INT case
359 359 __ movl(Address(c_rarg0, 0), rax);
360 360
361 361 __ BIND(exit);
362 362
363 363 // pop parameters
364 364 __ lea(rsp, rsp_after_call);
365 365
366 366 #ifdef ASSERT
367 367 // verify that threads correspond
368 368 {
369 369 Label L, S;
370 370 __ cmpptr(r15_thread, thread);
371 371 __ jcc(Assembler::notEqual, S);
372 372 __ get_thread(rbx);
373 373 __ cmpptr(r15_thread, rbx);
374 374 __ jcc(Assembler::equal, L);
375 375 __ bind(S);
376 376 __ jcc(Assembler::equal, L);
377 377 __ stop("StubRoutines::call_stub: threads must correspond");
378 378 __ bind(L);
379 379 }
380 380 #endif
381 381
382 382 // restore regs belonging to calling function
383 383 #ifdef _WIN64
384 384 for (int i = 15; i >= 6; i--) {
385 385 __ movdqu(as_XMMRegister(i), xmm_save(i));
386 386 }
387 387 #endif
388 388 __ movptr(r15, r15_save);
389 389 __ movptr(r14, r14_save);
390 390 __ movptr(r13, r13_save);
391 391 __ movptr(r12, r12_save);
392 392 __ movptr(rbx, rbx_save);
393 393
394 394 #ifdef _WIN64
395 395 __ movptr(rdi, rdi_save);
396 396 __ movptr(rsi, rsi_save);
397 397 #else
398 398 __ ldmxcsr(mxcsr_save);
399 399 #endif
400 400
401 401 // restore rsp
402 402 __ addptr(rsp, -rsp_after_call_off * wordSize);
403 403
404 404 // return
405 405 __ pop(rbp);
406 406 __ ret(0);
407 407
408 408 // handle return types different from T_INT
409 409 __ BIND(is_long);
410 410 __ movq(Address(c_rarg0, 0), rax);
411 411 __ jmp(exit);
412 412
413 413 __ BIND(is_float);
414 414 __ movflt(Address(c_rarg0, 0), xmm0);
415 415 __ jmp(exit);
416 416
417 417 __ BIND(is_double);
418 418 __ movdbl(Address(c_rarg0, 0), xmm0);
419 419 __ jmp(exit);
420 420
421 421 return start;
422 422 }
423 423
424 424 // Return point for a Java call if there's an exception thrown in
425 425 // Java code. The exception is caught and transformed into a
426 426 // pending exception stored in JavaThread that can be tested from
427 427 // within the VM.
428 428 //
429 429 // Note: Usually the parameters are removed by the callee. In case
430 430 // of an exception crossing an activation frame boundary, that is
431 431 // not the case if the callee is compiled code => need to setup the
432 432 // rsp.
433 433 //
434 434 // rax: exception oop
435 435
436 436 address generate_catch_exception() {
437 437 StubCodeMark mark(this, "StubRoutines", "catch_exception");
438 438 address start = __ pc();
439 439
440 440 // same as in generate_call_stub():
441 441 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);
442 442 const Address thread (rbp, thread_off * wordSize);
443 443
444 444 #ifdef ASSERT
445 445 // verify that threads correspond
446 446 {
447 447 Label L, S;
448 448 __ cmpptr(r15_thread, thread);
449 449 __ jcc(Assembler::notEqual, S);
450 450 __ get_thread(rbx);
451 451 __ cmpptr(r15_thread, rbx);
452 452 __ jcc(Assembler::equal, L);
453 453 __ bind(S);
454 454 __ stop("StubRoutines::catch_exception: threads must correspond");
455 455 __ bind(L);
456 456 }
457 457 #endif
458 458
459 459 // set pending exception
460 460 __ verify_oop(rax);
461 461
462 462 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax);
463 463 __ lea(rscratch1, ExternalAddress((address)__FILE__));
464 464 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1);
465 465 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__);
466 466
467 467 // complete return to VM
468 468 assert(StubRoutines::_call_stub_return_address != NULL,
469 469 "_call_stub_return_address must have been generated before");
470 470 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address));
471 471
472 472 return start;
473 473 }
474 474
475 475 // Continuation point for runtime calls returning with a pending
476 476 // exception. The pending exception check happened in the runtime
477 477 // or native call stub. The pending exception in Thread is
478 478 // converted into a Java-level exception.
479 479 //
480 480 // Contract with Java-level exception handlers:
481 481 // rax: exception
482 482 // rdx: throwing pc
483 483 //
484 484 // NOTE: At entry of this stub, exception-pc must be on stack !!
485 485
486 486 address generate_forward_exception() {
487 487 StubCodeMark mark(this, "StubRoutines", "forward exception");
488 488 address start = __ pc();
489 489
490 490 // Upon entry, the sp points to the return address returning into
491 491 // Java (interpreted or compiled) code; i.e., the return address
492 492 // becomes the throwing pc.
493 493 //
494 494 // Arguments pushed before the runtime call are still on the stack
495 495 // but the exception handler will reset the stack pointer ->
496 496 // ignore them. A potential result in registers can be ignored as
497 497 // well.
498 498
499 499 #ifdef ASSERT
500 500 // make sure this code is only executed if there is a pending exception
501 501 {
502 502 Label L;
503 503 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL);
504 504 __ jcc(Assembler::notEqual, L);
505 505 __ stop("StubRoutines::forward exception: no pending exception (1)");
506 506 __ bind(L);
507 507 }
508 508 #endif
509 509
510 510 // compute exception handler into rbx
511 511 __ movptr(c_rarg0, Address(rsp, 0));
512 512 BLOCK_COMMENT("call exception_handler_for_return_address");
513 513 __ call_VM_leaf(CAST_FROM_FN_PTR(address,
514 514 SharedRuntime::exception_handler_for_return_address),
515 515 r15_thread, c_rarg0);
516 516 __ mov(rbx, rax);
517 517
518 518 // setup rax & rdx, remove return address & clear pending exception
519 519 __ pop(rdx);
520 520 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
521 521 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
522 522
523 523 #ifdef ASSERT
524 524 // make sure exception is set
525 525 {
526 526 Label L;
527 527 __ testptr(rax, rax);
528 528 __ jcc(Assembler::notEqual, L);
529 529 __ stop("StubRoutines::forward exception: no pending exception (2)");
530 530 __ bind(L);
531 531 }
532 532 #endif
533 533
534 534 // continue at exception handler (return address removed)
535 535 // rax: exception
536 536 // rbx: exception handler
537 537 // rdx: throwing pc
538 538 __ verify_oop(rax);
539 539 __ jmp(rbx);
540 540
541 541 return start;
542 542 }
543 543
544 544 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest)
545 545 //
546 546 // Arguments :
547 547 // c_rarg0: exchange_value
548 548 // c_rarg0: dest
549 549 //
550 550 // Result:
551 551 // *dest <- ex, return (orig *dest)
552 552 address generate_atomic_xchg() {
553 553 StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
554 554 address start = __ pc();
555 555
556 556 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow
557 557 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK
558 558 __ ret(0);
559 559
560 560 return start;
561 561 }
562 562
563 563 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
564 564 //
565 565 // Arguments :
566 566 // c_rarg0: exchange_value
567 567 // c_rarg1: dest
568 568 //
569 569 // Result:
570 570 // *dest <- ex, return (orig *dest)
571 571 address generate_atomic_xchg_ptr() {
572 572 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr");
573 573 address start = __ pc();
574 574
575 575 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
576 576 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK
577 577 __ ret(0);
578 578
579 579 return start;
580 580 }
581 581
582 582 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest,
583 583 // jint compare_value)
584 584 //
585 585 // Arguments :
586 586 // c_rarg0: exchange_value
587 587 // c_rarg1: dest
588 588 // c_rarg2: compare_value
589 589 //
590 590 // Result:
591 591 // if ( compare_value == *dest ) {
592 592 // *dest = exchange_value
593 593 // return compare_value;
594 594 // else
595 595 // return *dest;
596 596 address generate_atomic_cmpxchg() {
597 597 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
598 598 address start = __ pc();
599 599
600 600 __ movl(rax, c_rarg2);
601 601 if ( os::is_MP() ) __ lock();
602 602 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0));
603 603 __ ret(0);
604 604
605 605 return start;
606 606 }
607 607
608 608 // Support for jint atomic::atomic_cmpxchg_long(jlong exchange_value,
609 609 // volatile jlong* dest,
610 610 // jlong compare_value)
611 611 // Arguments :
612 612 // c_rarg0: exchange_value
613 613 // c_rarg1: dest
614 614 // c_rarg2: compare_value
615 615 //
616 616 // Result:
617 617 // if ( compare_value == *dest ) {
618 618 // *dest = exchange_value
619 619 // return compare_value;
620 620 // else
621 621 // return *dest;
622 622 address generate_atomic_cmpxchg_long() {
623 623 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
624 624 address start = __ pc();
625 625
626 626 __ movq(rax, c_rarg2);
627 627 if ( os::is_MP() ) __ lock();
628 628 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0));
629 629 __ ret(0);
630 630
631 631 return start;
632 632 }
633 633
634 634 // Support for jint atomic::add(jint add_value, volatile jint* dest)
635 635 //
636 636 // Arguments :
637 637 // c_rarg0: add_value
638 638 // c_rarg1: dest
639 639 //
640 640 // Result:
641 641 // *dest += add_value
642 642 // return *dest;
643 643 address generate_atomic_add() {
644 644 StubCodeMark mark(this, "StubRoutines", "atomic_add");
645 645 address start = __ pc();
646 646
647 647 __ movl(rax, c_rarg0);
648 648 if ( os::is_MP() ) __ lock();
649 649 __ xaddl(Address(c_rarg1, 0), c_rarg0);
650 650 __ addl(rax, c_rarg0);
651 651 __ ret(0);
652 652
653 653 return start;
654 654 }
655 655
656 656 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
657 657 //
658 658 // Arguments :
659 659 // c_rarg0: add_value
660 660 // c_rarg1: dest
661 661 //
662 662 // Result:
663 663 // *dest += add_value
664 664 // return *dest;
665 665 address generate_atomic_add_ptr() {
666 666 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr");
667 667 address start = __ pc();
668 668
669 669 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
670 670 if ( os::is_MP() ) __ lock();
671 671 __ xaddptr(Address(c_rarg1, 0), c_rarg0);
672 672 __ addptr(rax, c_rarg0);
673 673 __ ret(0);
674 674
675 675 return start;
676 676 }
677 677
678 678 // Support for intptr_t OrderAccess::fence()
679 679 //
680 680 // Arguments :
681 681 //
682 682 // Result:
683 683 address generate_orderaccess_fence() {
684 684 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence");
685 685 address start = __ pc();
686 686 __ membar(Assembler::StoreLoad);
687 687 __ ret(0);
688 688
689 689 return start;
690 690 }
691 691
692 692 // Support for intptr_t get_previous_fp()
693 693 //
694 694 // This routine is used to find the previous frame pointer for the
695 695 // caller (current_frame_guess). This is used as part of debugging
696 696 // ps() is seemingly lost trying to find frames.
697 697 // This code assumes that caller current_frame_guess) has a frame.
698 698 address generate_get_previous_fp() {
699 699 StubCodeMark mark(this, "StubRoutines", "get_previous_fp");
700 700 const Address old_fp(rbp, 0);
701 701 const Address older_fp(rax, 0);
702 702 address start = __ pc();
703 703
704 704 __ enter();
705 705 __ movptr(rax, old_fp); // callers fp
706 706 __ movptr(rax, older_fp); // the frame for ps()
707 707 __ pop(rbp);
708 708 __ ret(0);
709 709
710 710 return start;
711 711 }
712 712
713 713 // Support for intptr_t get_previous_sp()
714 714 //
715 715 // This routine is used to find the previous stack pointer for the
716 716 // caller.
717 717 address generate_get_previous_sp() {
718 718 StubCodeMark mark(this, "StubRoutines", "get_previous_sp");
719 719 address start = __ pc();
720 720
721 721 __ movptr(rax, rsp);
722 722 __ addptr(rax, 8); // return address is at the top of the stack.
723 723 __ ret(0);
724 724
725 725 return start;
726 726 }
727 727
728 728 //----------------------------------------------------------------------------------------------------
729 729 // Support for void verify_mxcsr()
730 730 //
731 731 // This routine is used with -Xcheck:jni to verify that native
732 732 // JNI code does not return to Java code without restoring the
733 733 // MXCSR register to our expected state.
734 734
735 735 address generate_verify_mxcsr() {
736 736 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr");
737 737 address start = __ pc();
738 738
739 739 const Address mxcsr_save(rsp, 0);
740 740
741 741 if (CheckJNICalls) {
742 742 Label ok_ret;
743 743 __ push(rax);
744 744 __ subptr(rsp, wordSize); // allocate a temp location
745 745 __ stmxcsr(mxcsr_save);
746 746 __ movl(rax, mxcsr_save);
747 747 __ andl(rax, MXCSR_MASK); // Only check control and mask bits
748 748 __ cmpl(rax, *(int *)(StubRoutines::x86::mxcsr_std()));
749 749 __ jcc(Assembler::equal, ok_ret);
750 750
751 751 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall");
752 752
753 753 __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
754 754
755 755 __ bind(ok_ret);
756 756 __ addptr(rsp, wordSize);
757 757 __ pop(rax);
758 758 }
759 759
760 760 __ ret(0);
761 761
762 762 return start;
763 763 }
764 764
765 765 address generate_f2i_fixup() {
766 766 StubCodeMark mark(this, "StubRoutines", "f2i_fixup");
767 767 Address inout(rsp, 5 * wordSize); // return address + 4 saves
768 768
769 769 address start = __ pc();
770 770
771 771 Label L;
772 772
773 773 __ push(rax);
774 774 __ push(c_rarg3);
775 775 __ push(c_rarg2);
776 776 __ push(c_rarg1);
777 777
778 778 __ movl(rax, 0x7f800000);
779 779 __ xorl(c_rarg3, c_rarg3);
780 780 __ movl(c_rarg2, inout);
781 781 __ movl(c_rarg1, c_rarg2);
782 782 __ andl(c_rarg1, 0x7fffffff);
783 783 __ cmpl(rax, c_rarg1); // NaN? -> 0
784 784 __ jcc(Assembler::negative, L);
785 785 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint
786 786 __ movl(c_rarg3, 0x80000000);
787 787 __ movl(rax, 0x7fffffff);
788 788 __ cmovl(Assembler::positive, c_rarg3, rax);
789 789
790 790 __ bind(L);
791 791 __ movptr(inout, c_rarg3);
792 792
793 793 __ pop(c_rarg1);
794 794 __ pop(c_rarg2);
795 795 __ pop(c_rarg3);
796 796 __ pop(rax);
797 797
798 798 __ ret(0);
799 799
800 800 return start;
801 801 }
802 802
803 803 address generate_f2l_fixup() {
804 804 StubCodeMark mark(this, "StubRoutines", "f2l_fixup");
805 805 Address inout(rsp, 5 * wordSize); // return address + 4 saves
806 806 address start = __ pc();
807 807
808 808 Label L;
809 809
810 810 __ push(rax);
811 811 __ push(c_rarg3);
812 812 __ push(c_rarg2);
813 813 __ push(c_rarg1);
814 814
815 815 __ movl(rax, 0x7f800000);
816 816 __ xorl(c_rarg3, c_rarg3);
817 817 __ movl(c_rarg2, inout);
818 818 __ movl(c_rarg1, c_rarg2);
819 819 __ andl(c_rarg1, 0x7fffffff);
820 820 __ cmpl(rax, c_rarg1); // NaN? -> 0
821 821 __ jcc(Assembler::negative, L);
822 822 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong
823 823 __ mov64(c_rarg3, 0x8000000000000000);
824 824 __ mov64(rax, 0x7fffffffffffffff);
825 825 __ cmov(Assembler::positive, c_rarg3, rax);
826 826
827 827 __ bind(L);
828 828 __ movptr(inout, c_rarg3);
829 829
830 830 __ pop(c_rarg1);
831 831 __ pop(c_rarg2);
832 832 __ pop(c_rarg3);
833 833 __ pop(rax);
834 834
835 835 __ ret(0);
836 836
837 837 return start;
838 838 }
839 839
840 840 address generate_d2i_fixup() {
841 841 StubCodeMark mark(this, "StubRoutines", "d2i_fixup");
842 842 Address inout(rsp, 6 * wordSize); // return address + 5 saves
843 843
844 844 address start = __ pc();
845 845
846 846 Label L;
847 847
848 848 __ push(rax);
849 849 __ push(c_rarg3);
850 850 __ push(c_rarg2);
851 851 __ push(c_rarg1);
852 852 __ push(c_rarg0);
853 853
854 854 __ movl(rax, 0x7ff00000);
855 855 __ movq(c_rarg2, inout);
856 856 __ movl(c_rarg3, c_rarg2);
857 857 __ mov(c_rarg1, c_rarg2);
858 858 __ mov(c_rarg0, c_rarg2);
859 859 __ negl(c_rarg3);
860 860 __ shrptr(c_rarg1, 0x20);
861 861 __ orl(c_rarg3, c_rarg2);
862 862 __ andl(c_rarg1, 0x7fffffff);
863 863 __ xorl(c_rarg2, c_rarg2);
864 864 __ shrl(c_rarg3, 0x1f);
865 865 __ orl(c_rarg1, c_rarg3);
866 866 __ cmpl(rax, c_rarg1);
867 867 __ jcc(Assembler::negative, L); // NaN -> 0
868 868 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint
869 869 __ movl(c_rarg2, 0x80000000);
870 870 __ movl(rax, 0x7fffffff);
871 871 __ cmov(Assembler::positive, c_rarg2, rax);
872 872
873 873 __ bind(L);
874 874 __ movptr(inout, c_rarg2);
875 875
876 876 __ pop(c_rarg0);
877 877 __ pop(c_rarg1);
878 878 __ pop(c_rarg2);
879 879 __ pop(c_rarg3);
880 880 __ pop(rax);
881 881
882 882 __ ret(0);
883 883
884 884 return start;
885 885 }
886 886
887 887 address generate_d2l_fixup() {
888 888 StubCodeMark mark(this, "StubRoutines", "d2l_fixup");
889 889 Address inout(rsp, 6 * wordSize); // return address + 5 saves
890 890
891 891 address start = __ pc();
892 892
893 893 Label L;
894 894
895 895 __ push(rax);
896 896 __ push(c_rarg3);
897 897 __ push(c_rarg2);
898 898 __ push(c_rarg1);
899 899 __ push(c_rarg0);
900 900
901 901 __ movl(rax, 0x7ff00000);
902 902 __ movq(c_rarg2, inout);
903 903 __ movl(c_rarg3, c_rarg2);
904 904 __ mov(c_rarg1, c_rarg2);
905 905 __ mov(c_rarg0, c_rarg2);
906 906 __ negl(c_rarg3);
907 907 __ shrptr(c_rarg1, 0x20);
908 908 __ orl(c_rarg3, c_rarg2);
909 909 __ andl(c_rarg1, 0x7fffffff);
910 910 __ xorl(c_rarg2, c_rarg2);
911 911 __ shrl(c_rarg3, 0x1f);
912 912 __ orl(c_rarg1, c_rarg3);
913 913 __ cmpl(rax, c_rarg1);
914 914 __ jcc(Assembler::negative, L); // NaN -> 0
915 915 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong
916 916 __ mov64(c_rarg2, 0x8000000000000000);
917 917 __ mov64(rax, 0x7fffffffffffffff);
918 918 __ cmovq(Assembler::positive, c_rarg2, rax);
919 919
920 920 __ bind(L);
921 921 __ movq(inout, c_rarg2);
922 922
923 923 __ pop(c_rarg0);
924 924 __ pop(c_rarg1);
925 925 __ pop(c_rarg2);
926 926 __ pop(c_rarg3);
927 927 __ pop(rax);
928 928
929 929 __ ret(0);
930 930
931 931 return start;
932 932 }
933 933
934 934 address generate_fp_mask(const char *stub_name, int64_t mask) {
935 935 __ align(CodeEntryAlignment);
936 936 StubCodeMark mark(this, "StubRoutines", stub_name);
937 937 address start = __ pc();
938 938
939 939 __ emit_data64( mask, relocInfo::none );
940 940 __ emit_data64( mask, relocInfo::none );
941 941
942 942 return start;
943 943 }
944 944
945 945 // The following routine generates a subroutine to throw an
946 946 // asynchronous UnknownError when an unsafe access gets a fault that
947 947 // could not be reasonably prevented by the programmer. (Example:
948 948 // SIGBUS/OBJERR.)
949 949 address generate_handler_for_unsafe_access() {
950 950 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
951 951 address start = __ pc();
952 952
953 953 __ push(0); // hole for return address-to-be
954 954 __ pusha(); // push registers
955 955 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
956 956
957 957 // FIXME: this probably needs alignment logic
958 958
959 959 __ subptr(rsp, frame::arg_reg_save_area_bytes);
960 960 BLOCK_COMMENT("call handle_unsafe_access");
961 961 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access)));
962 962 __ addptr(rsp, frame::arg_reg_save_area_bytes);
963 963
964 964 __ movptr(next_pc, rax); // stuff next address
965 965 __ popa();
966 966 __ ret(0); // jump to next address
967 967
968 968 return start;
969 969 }
970 970
971 971 // Non-destructive plausibility checks for oops
972 972 //
973 973 // Arguments:
974 974 // all args on stack!
975 975 //
976 976 // Stack after saving c_rarg3:
977 977 // [tos + 0]: saved c_rarg3
978 978 // [tos + 1]: saved c_rarg2
979 979 // [tos + 2]: saved r12 (several TemplateTable methods use it)
980 980 // [tos + 3]: saved flags
981 981 // [tos + 4]: return address
982 982 // * [tos + 5]: error message (char*)
983 983 // * [tos + 6]: object to verify (oop)
984 984 // * [tos + 7]: saved rax - saved by caller and bashed
985 985 // * [tos + 8]: saved r10 (rscratch1) - saved by caller
986 986 // * = popped on exit
987 987 address generate_verify_oop() {
988 988 StubCodeMark mark(this, "StubRoutines", "verify_oop");
989 989 address start = __ pc();
990 990
991 991 Label exit, error;
992 992
993 993 __ pushf();
994 994 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr()));
995 995
996 996 __ push(r12);
997 997
998 998 // save c_rarg2 and c_rarg3
999 999 __ push(c_rarg2);
1000 1000 __ push(c_rarg3);
1001 1001
1002 1002 enum {
1003 1003 // After previous pushes.
1004 1004 oop_to_verify = 6 * wordSize,
1005 1005 saved_rax = 7 * wordSize,
1006 1006 saved_r10 = 8 * wordSize,
1007 1007
1008 1008 // Before the call to MacroAssembler::debug(), see below.
1009 1009 return_addr = 16 * wordSize,
1010 1010 error_msg = 17 * wordSize
1011 1011 };
1012 1012
1013 1013 // get object
1014 1014 __ movptr(rax, Address(rsp, oop_to_verify));
1015 1015
1016 1016 // make sure object is 'reasonable'
1017 1017 __ testptr(rax, rax);
1018 1018 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK
1019 1019 // Check if the oop is in the right area of memory
1020 1020 __ movptr(c_rarg2, rax);
1021 1021 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask());
1022 1022 __ andptr(c_rarg2, c_rarg3);
1023 1023 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits());
1024 1024 __ cmpptr(c_rarg2, c_rarg3);
1025 1025 __ jcc(Assembler::notZero, error);
1026 1026
1027 1027 // set r12 to heapbase for load_klass()
1028 1028 __ reinit_heapbase();
1029 1029
1030 1030 // make sure klass is 'reasonable'
1031 1031 __ load_klass(rax, rax); // get klass
1032 1032 __ testptr(rax, rax);
1033 1033 __ jcc(Assembler::zero, error); // if klass is NULL it is broken
1034 1034 // Check if the klass is in the right area of memory
1035 1035 __ mov(c_rarg2, rax);
1036 1036 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_mask());
1037 1037 __ andptr(c_rarg2, c_rarg3);
1038 1038 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_bits());
1039 1039 __ cmpptr(c_rarg2, c_rarg3);
1040 1040 __ jcc(Assembler::notZero, error);
1041 1041
1042 1042 // make sure klass' klass is 'reasonable'
1043 1043 __ load_klass(rax, rax);
1044 1044 __ testptr(rax, rax);
1045 1045 __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken
1046 1046 // Check if the klass' klass is in the right area of memory
1047 1047 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_mask());
1048 1048 __ andptr(rax, c_rarg3);
1049 1049 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_bits());
1050 1050 __ cmpptr(rax, c_rarg3);
1051 1051 __ jcc(Assembler::notZero, error);
1052 1052
1053 1053 // return if everything seems ok
1054 1054 __ bind(exit);
1055 1055 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
1056 1056 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back
1057 1057 __ pop(c_rarg3); // restore c_rarg3
1058 1058 __ pop(c_rarg2); // restore c_rarg2
1059 1059 __ pop(r12); // restore r12
1060 1060 __ popf(); // restore flags
1061 1061 __ ret(4 * wordSize); // pop caller saved stuff
1062 1062
1063 1063 // handle errors
1064 1064 __ bind(error);
1065 1065 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
1066 1066 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back
1067 1067 __ pop(c_rarg3); // get saved c_rarg3 back
1068 1068 __ pop(c_rarg2); // get saved c_rarg2 back
1069 1069 __ pop(r12); // get saved r12 back
1070 1070 __ popf(); // get saved flags off stack --
1071 1071 // will be ignored
1072 1072
1073 1073 __ pusha(); // push registers
1074 1074 // (rip is already
1075 1075 // already pushed)
1076 1076 // debug(char* msg, int64_t pc, int64_t regs[])
1077 1077 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and
1078 1078 // pushed all the registers, so now the stack looks like:
1079 1079 // [tos + 0] 16 saved registers
1080 1080 // [tos + 16] return address
1081 1081 // * [tos + 17] error message (char*)
1082 1082 // * [tos + 18] object to verify (oop)
1083 1083 // * [tos + 19] saved rax - saved by caller and bashed
1084 1084 // * [tos + 20] saved r10 (rscratch1) - saved by caller
1085 1085 // * = popped on exit
1086 1086
1087 1087 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message
1088 1088 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address
1089 1089 __ movq(c_rarg2, rsp); // pass address of regs on stack
1090 1090 __ mov(r12, rsp); // remember rsp
1091 1091 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1092 1092 __ andptr(rsp, -16); // align stack as required by ABI
1093 1093 BLOCK_COMMENT("call MacroAssembler::debug");
1094 1094 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
1095 1095 __ mov(rsp, r12); // restore rsp
1096 1096 __ popa(); // pop registers (includes r12)
1097 1097 __ ret(4 * wordSize); // pop caller saved stuff
1098 1098
1099 1099 return start;
1100 1100 }
1101 1101
1102 1102 //
1103 1103 // Verify that a register contains clean 32-bits positive value
1104 1104 // (high 32-bits are 0) so it could be used in 64-bits shifts.
1105 1105 //
1106 1106 // Input:
1107 1107 // Rint - 32-bits value
1108 1108 // Rtmp - scratch
1109 1109 //
1110 1110 void assert_clean_int(Register Rint, Register Rtmp) {
1111 1111 #ifdef ASSERT
1112 1112 Label L;
1113 1113 assert_different_registers(Rtmp, Rint);
1114 1114 __ movslq(Rtmp, Rint);
1115 1115 __ cmpq(Rtmp, Rint);
1116 1116 __ jcc(Assembler::equal, L);
1117 1117 __ stop("high 32-bits of int value are not 0");
1118 1118 __ bind(L);
1119 1119 #endif
1120 1120 }
1121 1121
1122 1122 // Generate overlap test for array copy stubs
1123 1123 //
1124 1124 // Input:
1125 1125 // c_rarg0 - from
1126 1126 // c_rarg1 - to
1127 1127 // c_rarg2 - element count
1128 1128 //
1129 1129 // Output:
1130 1130 // rax - &from[element count - 1]
1131 1131 //
1132 1132 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) {
1133 1133 assert(no_overlap_target != NULL, "must be generated");
1134 1134 array_overlap_test(no_overlap_target, NULL, sf);
1135 1135 }
1136 1136 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) {
1137 1137 array_overlap_test(NULL, &L_no_overlap, sf);
1138 1138 }
1139 1139 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) {
1140 1140 const Register from = c_rarg0;
1141 1141 const Register to = c_rarg1;
1142 1142 const Register count = c_rarg2;
1143 1143 const Register end_from = rax;
1144 1144
1145 1145 __ cmpptr(to, from);
1146 1146 __ lea(end_from, Address(from, count, sf, 0));
1147 1147 if (NOLp == NULL) {
1148 1148 ExternalAddress no_overlap(no_overlap_target);
1149 1149 __ jump_cc(Assembler::belowEqual, no_overlap);
1150 1150 __ cmpptr(to, end_from);
1151 1151 __ jump_cc(Assembler::aboveEqual, no_overlap);
1152 1152 } else {
1153 1153 __ jcc(Assembler::belowEqual, (*NOLp));
1154 1154 __ cmpptr(to, end_from);
1155 1155 __ jcc(Assembler::aboveEqual, (*NOLp));
1156 1156 }
1157 1157 }
1158 1158
1159 1159 // Shuffle first three arg regs on Windows into Linux/Solaris locations.
1160 1160 //
1161 1161 // Outputs:
1162 1162 // rdi - rcx
1163 1163 // rsi - rdx
1164 1164 // rdx - r8
1165 1165 // rcx - r9
1166 1166 //
1167 1167 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter
1168 1168 // are non-volatile. r9 and r10 should not be used by the caller.
1169 1169 //
1170 1170 void setup_arg_regs(int nargs = 3) {
1171 1171 const Register saved_rdi = r9;
1172 1172 const Register saved_rsi = r10;
1173 1173 assert(nargs == 3 || nargs == 4, "else fix");
1174 1174 #ifdef _WIN64
1175 1175 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9,
1176 1176 "unexpected argument registers");
1177 1177 if (nargs >= 4)
1178 1178 __ mov(rax, r9); // r9 is also saved_rdi
1179 1179 __ movptr(saved_rdi, rdi);
1180 1180 __ movptr(saved_rsi, rsi);
1181 1181 __ mov(rdi, rcx); // c_rarg0
1182 1182 __ mov(rsi, rdx); // c_rarg1
1183 1183 __ mov(rdx, r8); // c_rarg2
1184 1184 if (nargs >= 4)
1185 1185 __ mov(rcx, rax); // c_rarg3 (via rax)
1186 1186 #else
1187 1187 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx,
1188 1188 "unexpected argument registers");
1189 1189 #endif
1190 1190 }
1191 1191
1192 1192 void restore_arg_regs() {
1193 1193 const Register saved_rdi = r9;
1194 1194 const Register saved_rsi = r10;
1195 1195 #ifdef _WIN64
1196 1196 __ movptr(rdi, saved_rdi);
1197 1197 __ movptr(rsi, saved_rsi);
1198 1198 #endif
1199 1199 }
1200 1200
1201 1201 // Generate code for an array write pre barrier
1202 1202 //
1203 1203 // addr - starting address
1204 1204 // count - element count
1205 1205 // tmp - scratch register
1206 1206 //
1207 1207 // Destroy no registers!
1208 1208 //
1209 1209 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
1210 1210 BarrierSet* bs = Universe::heap()->barrier_set();
1211 1211 switch (bs->kind()) {
1212 1212 case BarrierSet::G1SATBCT:
1213 1213 case BarrierSet::G1SATBCTLogging:
1214 1214 // With G1, don't generate the call if we statically know that the target in uninitialized
1215 1215 if (!dest_uninitialized) {
1216 1216 __ pusha(); // push registers
1217 1217 if (count == c_rarg0) {
1218 1218 if (addr == c_rarg1) {
1219 1219 // exactly backwards!!
1220 1220 __ xchgptr(c_rarg1, c_rarg0);
1221 1221 } else {
1222 1222 __ movptr(c_rarg1, count);
1223 1223 __ movptr(c_rarg0, addr);
1224 1224 }
1225 1225 } else {
1226 1226 __ movptr(c_rarg0, addr);
1227 1227 __ movptr(c_rarg1, count);
1228 1228 }
1229 1229 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
1230 1230 __ popa();
1231 1231 }
1232 1232 break;
1233 1233 case BarrierSet::CardTableModRef:
1234 1234 case BarrierSet::CardTableExtension:
1235 1235 case BarrierSet::ModRef:
1236 1236 break;
1237 1237 default:
↓ open down ↓ |
1237 lines elided |
↑ open up ↑ |
1238 1238 ShouldNotReachHere();
1239 1239
1240 1240 }
1241 1241 }
1242 1242
1243 1243 //
1244 1244 // Generate code for an array write post barrier
1245 1245 //
1246 1246 // Input:
1247 1247 // start - register containing starting address of destination array
1248 - // end - register containing ending address of destination array
1248 + // count - elements count
1249 1249 // scratch - scratch register
1250 1250 //
1251 1251 // The input registers are overwritten.
1252 - // The ending address is inclusive.
1253 - void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch) {
1254 - assert_different_registers(start, end, scratch);
1252 + //
1253 + void gen_write_ref_array_post_barrier(Register start, Register count, Register scratch) {
1254 + assert_different_registers(start, count, scratch);
1255 1255 BarrierSet* bs = Universe::heap()->barrier_set();
1256 1256 switch (bs->kind()) {
1257 1257 case BarrierSet::G1SATBCT:
1258 1258 case BarrierSet::G1SATBCTLogging:
1259 -
1260 1259 {
1261 - __ pusha(); // push registers (overkill)
1262 - // must compute element count unless barrier set interface is changed (other platforms supply count)
1263 - assert_different_registers(start, end, scratch);
1264 - __ lea(scratch, Address(end, BytesPerHeapOop));
1265 - __ subptr(scratch, start); // subtract start to get #bytes
1266 - __ shrptr(scratch, LogBytesPerHeapOop); // convert to element count
1267 - __ mov(c_rarg0, start);
1268 - __ mov(c_rarg1, scratch);
1260 + __ pusha(); // push registers (overkill)
1261 + if (c_rarg0 == count) { // On win64 c_rarg0 == rcx
1262 + assert_different_registers(c_rarg1, start);
1263 + __ mov(c_rarg1, count);
1264 + __ mov(c_rarg0, start);
1265 + } else {
1266 + assert_different_registers(c_rarg0, count);
1267 + __ mov(c_rarg0, start);
1268 + __ mov(c_rarg1, count);
1269 + }
1269 1270 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
1270 1271 __ popa();
1271 1272 }
1272 1273 break;
1273 1274 case BarrierSet::CardTableModRef:
1274 1275 case BarrierSet::CardTableExtension:
1275 1276 {
1276 1277 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1277 1278 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1278 1279
1279 1280 Label L_loop;
1281 + const Register end = count;
1280 1282
1281 - __ shrptr(start, CardTableModRefBS::card_shift);
1282 - __ addptr(end, BytesPerHeapOop);
1283 - __ shrptr(end, CardTableModRefBS::card_shift);
1284 - __ subptr(end, start); // number of bytes to copy
1283 + __ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size
1284 + __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
1285 + __ shrptr(start, CardTableModRefBS::card_shift);
1286 + __ shrptr(end, CardTableModRefBS::card_shift);
1287 + __ subptr(end, start); // end --> cards count
1285 1288
1286 - intptr_t disp = (intptr_t) ct->byte_map_base;
1287 - if (Assembler::is_simm32(disp)) {
1288 - Address cardtable(noreg, noreg, Address::no_scale, disp);
1289 - __ lea(scratch, cardtable);
1290 - } else {
1291 - ExternalAddress cardtable((address)disp);
1292 - __ lea(scratch, cardtable);
1293 - }
1294 -
1295 - const Register count = end; // 'end' register contains bytes count now
1289 + int64_t disp = (int64_t) ct->byte_map_base;
1290 + __ mov64(scratch, disp);
1296 1291 __ addptr(start, scratch);
1297 1292 __ BIND(L_loop);
1298 1293 __ movb(Address(start, count, Address::times_1), 0);
1299 1294 __ decrement(count);
1300 1295 __ jcc(Assembler::greaterEqual, L_loop);
1301 1296 }
1302 1297 break;
1303 1298 default:
1304 1299 ShouldNotReachHere();
1305 1300
1306 1301 }
1307 1302 }
1308 1303
1309 1304
1310 1305 // Copy big chunks forward
1311 1306 //
1312 1307 // Inputs:
1313 1308 // end_from - source arrays end address
1314 1309 // end_to - destination array end address
1315 1310 // qword_count - 64-bits element count, negative
1316 1311 // to - scratch
1317 1312 // L_copy_bytes - entry label
1318 1313 // L_copy_8_bytes - exit label
1319 1314 //
1320 1315 void copy_bytes_forward(Register end_from, Register end_to,
1321 1316 Register qword_count, Register to,
1322 1317 Label& L_copy_bytes, Label& L_copy_8_bytes) {
1323 1318 DEBUG_ONLY(__ stop("enter at entry label, not here"));
1324 1319 Label L_loop;
1325 1320 __ align(OptoLoopAlignment);
1326 1321 if (UseUnalignedLoadStores) {
1327 1322 Label L_end;
1328 1323 // Copy 64-bytes per iteration
1329 1324 __ BIND(L_loop);
1330 1325 if (UseAVX >= 2) {
1331 1326 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56));
1332 1327 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0);
1333 1328 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24));
1334 1329 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1);
1335 1330 } else {
1336 1331 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56));
1337 1332 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0);
1338 1333 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40));
1339 1334 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1);
1340 1335 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24));
1341 1336 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2);
1342 1337 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8));
1343 1338 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3);
1344 1339 }
1345 1340 __ BIND(L_copy_bytes);
1346 1341 __ addptr(qword_count, 8);
1347 1342 __ jcc(Assembler::lessEqual, L_loop);
1348 1343 __ subptr(qword_count, 4); // sub(8) and add(4)
1349 1344 __ jccb(Assembler::greater, L_end);
1350 1345 // Copy trailing 32 bytes
1351 1346 if (UseAVX >= 2) {
1352 1347 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24));
1353 1348 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0);
1354 1349 } else {
1355 1350 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24));
1356 1351 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0);
1357 1352 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8));
1358 1353 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1);
1359 1354 }
1360 1355 __ addptr(qword_count, 4);
1361 1356 __ BIND(L_end);
1362 1357 if (UseAVX >= 2) {
1363 1358 // clean upper bits of YMM registers
1364 1359 __ vzeroupper();
1365 1360 }
1366 1361 } else {
1367 1362 // Copy 32-bytes per iteration
1368 1363 __ BIND(L_loop);
1369 1364 __ movq(to, Address(end_from, qword_count, Address::times_8, -24));
1370 1365 __ movq(Address(end_to, qword_count, Address::times_8, -24), to);
1371 1366 __ movq(to, Address(end_from, qword_count, Address::times_8, -16));
1372 1367 __ movq(Address(end_to, qword_count, Address::times_8, -16), to);
1373 1368 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8));
1374 1369 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to);
1375 1370 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0));
1376 1371 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to);
1377 1372
1378 1373 __ BIND(L_copy_bytes);
1379 1374 __ addptr(qword_count, 4);
1380 1375 __ jcc(Assembler::lessEqual, L_loop);
1381 1376 }
1382 1377 __ subptr(qword_count, 4);
1383 1378 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords
1384 1379 }
1385 1380
1386 1381 // Copy big chunks backward
1387 1382 //
1388 1383 // Inputs:
1389 1384 // from - source arrays address
1390 1385 // dest - destination array address
1391 1386 // qword_count - 64-bits element count
1392 1387 // to - scratch
1393 1388 // L_copy_bytes - entry label
1394 1389 // L_copy_8_bytes - exit label
1395 1390 //
1396 1391 void copy_bytes_backward(Register from, Register dest,
1397 1392 Register qword_count, Register to,
1398 1393 Label& L_copy_bytes, Label& L_copy_8_bytes) {
1399 1394 DEBUG_ONLY(__ stop("enter at entry label, not here"));
1400 1395 Label L_loop;
1401 1396 __ align(OptoLoopAlignment);
1402 1397 if (UseUnalignedLoadStores) {
1403 1398 Label L_end;
1404 1399 // Copy 64-bytes per iteration
1405 1400 __ BIND(L_loop);
1406 1401 if (UseAVX >= 2) {
1407 1402 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32));
1408 1403 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0);
1409 1404 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0));
1410 1405 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1);
1411 1406 } else {
1412 1407 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48));
1413 1408 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0);
1414 1409 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32));
1415 1410 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1);
1416 1411 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16));
1417 1412 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2);
1418 1413 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0));
1419 1414 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3);
1420 1415 }
1421 1416 __ BIND(L_copy_bytes);
1422 1417 __ subptr(qword_count, 8);
1423 1418 __ jcc(Assembler::greaterEqual, L_loop);
1424 1419
1425 1420 __ addptr(qword_count, 4); // add(8) and sub(4)
1426 1421 __ jccb(Assembler::less, L_end);
1427 1422 // Copy trailing 32 bytes
1428 1423 if (UseAVX >= 2) {
1429 1424 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0));
1430 1425 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0);
1431 1426 } else {
1432 1427 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16));
1433 1428 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0);
1434 1429 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0));
1435 1430 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1);
1436 1431 }
1437 1432 __ subptr(qword_count, 4);
1438 1433 __ BIND(L_end);
1439 1434 if (UseAVX >= 2) {
1440 1435 // clean upper bits of YMM registers
1441 1436 __ vzeroupper();
1442 1437 }
1443 1438 } else {
1444 1439 // Copy 32-bytes per iteration
1445 1440 __ BIND(L_loop);
1446 1441 __ movq(to, Address(from, qword_count, Address::times_8, 24));
1447 1442 __ movq(Address(dest, qword_count, Address::times_8, 24), to);
1448 1443 __ movq(to, Address(from, qword_count, Address::times_8, 16));
1449 1444 __ movq(Address(dest, qword_count, Address::times_8, 16), to);
1450 1445 __ movq(to, Address(from, qword_count, Address::times_8, 8));
1451 1446 __ movq(Address(dest, qword_count, Address::times_8, 8), to);
1452 1447 __ movq(to, Address(from, qword_count, Address::times_8, 0));
1453 1448 __ movq(Address(dest, qword_count, Address::times_8, 0), to);
1454 1449
1455 1450 __ BIND(L_copy_bytes);
1456 1451 __ subptr(qword_count, 4);
1457 1452 __ jcc(Assembler::greaterEqual, L_loop);
1458 1453 }
1459 1454 __ addptr(qword_count, 4);
1460 1455 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords
1461 1456 }
1462 1457
1463 1458
1464 1459 // Arguments:
1465 1460 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1466 1461 // ignored
1467 1462 // name - stub name string
1468 1463 //
1469 1464 // Inputs:
1470 1465 // c_rarg0 - source array address
1471 1466 // c_rarg1 - destination array address
1472 1467 // c_rarg2 - element count, treated as ssize_t, can be zero
1473 1468 //
1474 1469 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries,
1475 1470 // we let the hardware handle it. The one to eight bytes within words,
1476 1471 // dwords or qwords that span cache line boundaries will still be loaded
1477 1472 // and stored atomically.
1478 1473 //
1479 1474 // Side Effects:
1480 1475 // disjoint_byte_copy_entry is set to the no-overlap entry point
1481 1476 // used by generate_conjoint_byte_copy().
1482 1477 //
1483 1478 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) {
1484 1479 __ align(CodeEntryAlignment);
1485 1480 StubCodeMark mark(this, "StubRoutines", name);
1486 1481 address start = __ pc();
1487 1482
1488 1483 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes;
1489 1484 Label L_copy_byte, L_exit;
1490 1485 const Register from = rdi; // source array address
1491 1486 const Register to = rsi; // destination array address
1492 1487 const Register count = rdx; // elements count
1493 1488 const Register byte_count = rcx;
1494 1489 const Register qword_count = count;
1495 1490 const Register end_from = from; // source array end address
1496 1491 const Register end_to = to; // destination array end address
1497 1492 // End pointers are inclusive, and if count is not zero they point
1498 1493 // to the last unit copied: end_to[0] := end_from[0]
1499 1494
1500 1495 __ enter(); // required for proper stackwalking of RuntimeStub frame
1501 1496 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1502 1497
1503 1498 if (entry != NULL) {
1504 1499 *entry = __ pc();
1505 1500 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1506 1501 BLOCK_COMMENT("Entry:");
1507 1502 }
1508 1503
1509 1504 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1510 1505 // r9 and r10 may be used to save non-volatile registers
1511 1506
1512 1507 // 'from', 'to' and 'count' are now valid
1513 1508 __ movptr(byte_count, count);
1514 1509 __ shrptr(count, 3); // count => qword_count
1515 1510
1516 1511 // Copy from low to high addresses. Use 'to' as scratch.
1517 1512 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1518 1513 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1519 1514 __ negptr(qword_count); // make the count negative
1520 1515 __ jmp(L_copy_bytes);
1521 1516
1522 1517 // Copy trailing qwords
1523 1518 __ BIND(L_copy_8_bytes);
1524 1519 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1525 1520 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1526 1521 __ increment(qword_count);
1527 1522 __ jcc(Assembler::notZero, L_copy_8_bytes);
1528 1523
1529 1524 // Check for and copy trailing dword
1530 1525 __ BIND(L_copy_4_bytes);
1531 1526 __ testl(byte_count, 4);
1532 1527 __ jccb(Assembler::zero, L_copy_2_bytes);
1533 1528 __ movl(rax, Address(end_from, 8));
1534 1529 __ movl(Address(end_to, 8), rax);
1535 1530
1536 1531 __ addptr(end_from, 4);
1537 1532 __ addptr(end_to, 4);
1538 1533
1539 1534 // Check for and copy trailing word
1540 1535 __ BIND(L_copy_2_bytes);
1541 1536 __ testl(byte_count, 2);
1542 1537 __ jccb(Assembler::zero, L_copy_byte);
1543 1538 __ movw(rax, Address(end_from, 8));
1544 1539 __ movw(Address(end_to, 8), rax);
1545 1540
1546 1541 __ addptr(end_from, 2);
1547 1542 __ addptr(end_to, 2);
1548 1543
1549 1544 // Check for and copy trailing byte
1550 1545 __ BIND(L_copy_byte);
1551 1546 __ testl(byte_count, 1);
1552 1547 __ jccb(Assembler::zero, L_exit);
1553 1548 __ movb(rax, Address(end_from, 8));
1554 1549 __ movb(Address(end_to, 8), rax);
1555 1550
1556 1551 __ BIND(L_exit);
1557 1552 restore_arg_regs();
1558 1553 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free
1559 1554 __ xorptr(rax, rax); // return 0
1560 1555 __ leave(); // required for proper stackwalking of RuntimeStub frame
1561 1556 __ ret(0);
1562 1557
1563 1558 // Copy in multi-bytes chunks
1564 1559 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
1565 1560 __ jmp(L_copy_4_bytes);
1566 1561
1567 1562 return start;
1568 1563 }
1569 1564
1570 1565 // Arguments:
1571 1566 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1572 1567 // ignored
1573 1568 // name - stub name string
1574 1569 //
1575 1570 // Inputs:
1576 1571 // c_rarg0 - source array address
1577 1572 // c_rarg1 - destination array address
1578 1573 // c_rarg2 - element count, treated as ssize_t, can be zero
1579 1574 //
1580 1575 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries,
1581 1576 // we let the hardware handle it. The one to eight bytes within words,
1582 1577 // dwords or qwords that span cache line boundaries will still be loaded
1583 1578 // and stored atomically.
1584 1579 //
1585 1580 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target,
1586 1581 address* entry, const char *name) {
1587 1582 __ align(CodeEntryAlignment);
1588 1583 StubCodeMark mark(this, "StubRoutines", name);
1589 1584 address start = __ pc();
1590 1585
1591 1586 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes;
1592 1587 const Register from = rdi; // source array address
1593 1588 const Register to = rsi; // destination array address
1594 1589 const Register count = rdx; // elements count
1595 1590 const Register byte_count = rcx;
1596 1591 const Register qword_count = count;
1597 1592
1598 1593 __ enter(); // required for proper stackwalking of RuntimeStub frame
1599 1594 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1600 1595
1601 1596 if (entry != NULL) {
1602 1597 *entry = __ pc();
1603 1598 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1604 1599 BLOCK_COMMENT("Entry:");
1605 1600 }
1606 1601
1607 1602 array_overlap_test(nooverlap_target, Address::times_1);
1608 1603 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1609 1604 // r9 and r10 may be used to save non-volatile registers
1610 1605
1611 1606 // 'from', 'to' and 'count' are now valid
1612 1607 __ movptr(byte_count, count);
1613 1608 __ shrptr(count, 3); // count => qword_count
1614 1609
1615 1610 // Copy from high to low addresses.
1616 1611
1617 1612 // Check for and copy trailing byte
1618 1613 __ testl(byte_count, 1);
1619 1614 __ jcc(Assembler::zero, L_copy_2_bytes);
1620 1615 __ movb(rax, Address(from, byte_count, Address::times_1, -1));
1621 1616 __ movb(Address(to, byte_count, Address::times_1, -1), rax);
1622 1617 __ decrement(byte_count); // Adjust for possible trailing word
1623 1618
1624 1619 // Check for and copy trailing word
1625 1620 __ BIND(L_copy_2_bytes);
1626 1621 __ testl(byte_count, 2);
1627 1622 __ jcc(Assembler::zero, L_copy_4_bytes);
1628 1623 __ movw(rax, Address(from, byte_count, Address::times_1, -2));
1629 1624 __ movw(Address(to, byte_count, Address::times_1, -2), rax);
1630 1625
1631 1626 // Check for and copy trailing dword
1632 1627 __ BIND(L_copy_4_bytes);
1633 1628 __ testl(byte_count, 4);
1634 1629 __ jcc(Assembler::zero, L_copy_bytes);
1635 1630 __ movl(rax, Address(from, qword_count, Address::times_8));
1636 1631 __ movl(Address(to, qword_count, Address::times_8), rax);
1637 1632 __ jmp(L_copy_bytes);
1638 1633
1639 1634 // Copy trailing qwords
1640 1635 __ BIND(L_copy_8_bytes);
1641 1636 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1642 1637 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1643 1638 __ decrement(qword_count);
1644 1639 __ jcc(Assembler::notZero, L_copy_8_bytes);
1645 1640
1646 1641 restore_arg_regs();
1647 1642 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free
1648 1643 __ xorptr(rax, rax); // return 0
1649 1644 __ leave(); // required for proper stackwalking of RuntimeStub frame
1650 1645 __ ret(0);
1651 1646
1652 1647 // Copy in multi-bytes chunks
1653 1648 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
1654 1649
1655 1650 restore_arg_regs();
1656 1651 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free
1657 1652 __ xorptr(rax, rax); // return 0
1658 1653 __ leave(); // required for proper stackwalking of RuntimeStub frame
1659 1654 __ ret(0);
1660 1655
1661 1656 return start;
1662 1657 }
1663 1658
1664 1659 // Arguments:
1665 1660 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1666 1661 // ignored
1667 1662 // name - stub name string
1668 1663 //
1669 1664 // Inputs:
1670 1665 // c_rarg0 - source array address
1671 1666 // c_rarg1 - destination array address
1672 1667 // c_rarg2 - element count, treated as ssize_t, can be zero
1673 1668 //
1674 1669 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
1675 1670 // let the hardware handle it. The two or four words within dwords
1676 1671 // or qwords that span cache line boundaries will still be loaded
1677 1672 // and stored atomically.
1678 1673 //
1679 1674 // Side Effects:
1680 1675 // disjoint_short_copy_entry is set to the no-overlap entry point
1681 1676 // used by generate_conjoint_short_copy().
1682 1677 //
1683 1678 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) {
1684 1679 __ align(CodeEntryAlignment);
1685 1680 StubCodeMark mark(this, "StubRoutines", name);
1686 1681 address start = __ pc();
1687 1682
1688 1683 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit;
1689 1684 const Register from = rdi; // source array address
1690 1685 const Register to = rsi; // destination array address
1691 1686 const Register count = rdx; // elements count
1692 1687 const Register word_count = rcx;
1693 1688 const Register qword_count = count;
1694 1689 const Register end_from = from; // source array end address
1695 1690 const Register end_to = to; // destination array end address
1696 1691 // End pointers are inclusive, and if count is not zero they point
1697 1692 // to the last unit copied: end_to[0] := end_from[0]
1698 1693
1699 1694 __ enter(); // required for proper stackwalking of RuntimeStub frame
1700 1695 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1701 1696
1702 1697 if (entry != NULL) {
1703 1698 *entry = __ pc();
1704 1699 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1705 1700 BLOCK_COMMENT("Entry:");
1706 1701 }
1707 1702
1708 1703 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1709 1704 // r9 and r10 may be used to save non-volatile registers
1710 1705
1711 1706 // 'from', 'to' and 'count' are now valid
1712 1707 __ movptr(word_count, count);
1713 1708 __ shrptr(count, 2); // count => qword_count
1714 1709
1715 1710 // Copy from low to high addresses. Use 'to' as scratch.
1716 1711 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1717 1712 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1718 1713 __ negptr(qword_count);
1719 1714 __ jmp(L_copy_bytes);
1720 1715
1721 1716 // Copy trailing qwords
1722 1717 __ BIND(L_copy_8_bytes);
1723 1718 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1724 1719 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1725 1720 __ increment(qword_count);
1726 1721 __ jcc(Assembler::notZero, L_copy_8_bytes);
1727 1722
1728 1723 // Original 'dest' is trashed, so we can't use it as a
1729 1724 // base register for a possible trailing word copy
1730 1725
1731 1726 // Check for and copy trailing dword
1732 1727 __ BIND(L_copy_4_bytes);
1733 1728 __ testl(word_count, 2);
1734 1729 __ jccb(Assembler::zero, L_copy_2_bytes);
1735 1730 __ movl(rax, Address(end_from, 8));
1736 1731 __ movl(Address(end_to, 8), rax);
1737 1732
1738 1733 __ addptr(end_from, 4);
1739 1734 __ addptr(end_to, 4);
1740 1735
1741 1736 // Check for and copy trailing word
1742 1737 __ BIND(L_copy_2_bytes);
1743 1738 __ testl(word_count, 1);
1744 1739 __ jccb(Assembler::zero, L_exit);
1745 1740 __ movw(rax, Address(end_from, 8));
1746 1741 __ movw(Address(end_to, 8), rax);
1747 1742
1748 1743 __ BIND(L_exit);
1749 1744 restore_arg_regs();
1750 1745 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free
1751 1746 __ xorptr(rax, rax); // return 0
1752 1747 __ leave(); // required for proper stackwalking of RuntimeStub frame
1753 1748 __ ret(0);
1754 1749
1755 1750 // Copy in multi-bytes chunks
1756 1751 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
1757 1752 __ jmp(L_copy_4_bytes);
1758 1753
1759 1754 return start;
1760 1755 }
1761 1756
1762 1757 address generate_fill(BasicType t, bool aligned, const char *name) {
1763 1758 __ align(CodeEntryAlignment);
1764 1759 StubCodeMark mark(this, "StubRoutines", name);
1765 1760 address start = __ pc();
1766 1761
1767 1762 BLOCK_COMMENT("Entry:");
1768 1763
1769 1764 const Register to = c_rarg0; // source array address
1770 1765 const Register value = c_rarg1; // value
1771 1766 const Register count = c_rarg2; // elements count
1772 1767
1773 1768 __ enter(); // required for proper stackwalking of RuntimeStub frame
1774 1769
1775 1770 __ generate_fill(t, aligned, to, value, count, rax, xmm0);
1776 1771
1777 1772 __ leave(); // required for proper stackwalking of RuntimeStub frame
1778 1773 __ ret(0);
1779 1774 return start;
1780 1775 }
1781 1776
1782 1777 // Arguments:
1783 1778 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1784 1779 // ignored
1785 1780 // name - stub name string
1786 1781 //
1787 1782 // Inputs:
1788 1783 // c_rarg0 - source array address
1789 1784 // c_rarg1 - destination array address
1790 1785 // c_rarg2 - element count, treated as ssize_t, can be zero
1791 1786 //
1792 1787 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
1793 1788 // let the hardware handle it. The two or four words within dwords
1794 1789 // or qwords that span cache line boundaries will still be loaded
1795 1790 // and stored atomically.
1796 1791 //
1797 1792 address generate_conjoint_short_copy(bool aligned, address nooverlap_target,
1798 1793 address *entry, const char *name) {
1799 1794 __ align(CodeEntryAlignment);
1800 1795 StubCodeMark mark(this, "StubRoutines", name);
1801 1796 address start = __ pc();
1802 1797
1803 1798 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes;
1804 1799 const Register from = rdi; // source array address
1805 1800 const Register to = rsi; // destination array address
1806 1801 const Register count = rdx; // elements count
1807 1802 const Register word_count = rcx;
1808 1803 const Register qword_count = count;
1809 1804
1810 1805 __ enter(); // required for proper stackwalking of RuntimeStub frame
1811 1806 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1812 1807
1813 1808 if (entry != NULL) {
1814 1809 *entry = __ pc();
1815 1810 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1816 1811 BLOCK_COMMENT("Entry:");
1817 1812 }
1818 1813
1819 1814 array_overlap_test(nooverlap_target, Address::times_2);
1820 1815 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1821 1816 // r9 and r10 may be used to save non-volatile registers
1822 1817
1823 1818 // 'from', 'to' and 'count' are now valid
1824 1819 __ movptr(word_count, count);
1825 1820 __ shrptr(count, 2); // count => qword_count
1826 1821
1827 1822 // Copy from high to low addresses. Use 'to' as scratch.
1828 1823
1829 1824 // Check for and copy trailing word
1830 1825 __ testl(word_count, 1);
1831 1826 __ jccb(Assembler::zero, L_copy_4_bytes);
1832 1827 __ movw(rax, Address(from, word_count, Address::times_2, -2));
1833 1828 __ movw(Address(to, word_count, Address::times_2, -2), rax);
1834 1829
1835 1830 // Check for and copy trailing dword
1836 1831 __ BIND(L_copy_4_bytes);
1837 1832 __ testl(word_count, 2);
1838 1833 __ jcc(Assembler::zero, L_copy_bytes);
1839 1834 __ movl(rax, Address(from, qword_count, Address::times_8));
1840 1835 __ movl(Address(to, qword_count, Address::times_8), rax);
1841 1836 __ jmp(L_copy_bytes);
1842 1837
1843 1838 // Copy trailing qwords
1844 1839 __ BIND(L_copy_8_bytes);
1845 1840 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1846 1841 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1847 1842 __ decrement(qword_count);
1848 1843 __ jcc(Assembler::notZero, L_copy_8_bytes);
1849 1844
1850 1845 restore_arg_regs();
1851 1846 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free
1852 1847 __ xorptr(rax, rax); // return 0
1853 1848 __ leave(); // required for proper stackwalking of RuntimeStub frame
1854 1849 __ ret(0);
1855 1850
1856 1851 // Copy in multi-bytes chunks
1857 1852 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
1858 1853
1859 1854 restore_arg_regs();
1860 1855 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free
1861 1856 __ xorptr(rax, rax); // return 0
1862 1857 __ leave(); // required for proper stackwalking of RuntimeStub frame
1863 1858 __ ret(0);
1864 1859
1865 1860 return start;
1866 1861 }
1867 1862
1868 1863 // Arguments:
1869 1864 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1870 1865 // ignored
1871 1866 // is_oop - true => oop array, so generate store check code
1872 1867 // name - stub name string
1873 1868 //
1874 1869 // Inputs:
1875 1870 // c_rarg0 - source array address
1876 1871 // c_rarg1 - destination array address
1877 1872 // c_rarg2 - element count, treated as ssize_t, can be zero
1878 1873 //
1879 1874 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
1880 1875 // the hardware handle it. The two dwords within qwords that span
1881 1876 // cache line boundaries will still be loaded and stored atomicly.
1882 1877 //
1883 1878 // Side Effects:
1884 1879 // disjoint_int_copy_entry is set to the no-overlap entry point
1885 1880 // used by generate_conjoint_int_oop_copy().
1886 1881 //
1887 1882 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry,
1888 1883 const char *name, bool dest_uninitialized = false) {
1889 1884 __ align(CodeEntryAlignment);
1890 1885 StubCodeMark mark(this, "StubRoutines", name);
1891 1886 address start = __ pc();
1892 1887
1893 1888 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit;
1894 1889 const Register from = rdi; // source array address
1895 1890 const Register to = rsi; // destination array address
1896 1891 const Register count = rdx; // elements count
1897 1892 const Register dword_count = rcx;
1898 1893 const Register qword_count = count;
1899 1894 const Register end_from = from; // source array end address
1900 1895 const Register end_to = to; // destination array end address
1901 1896 const Register saved_to = r11; // saved destination array address
1902 1897 // End pointers are inclusive, and if count is not zero they point
1903 1898 // to the last unit copied: end_to[0] := end_from[0]
1904 1899
1905 1900 __ enter(); // required for proper stackwalking of RuntimeStub frame
1906 1901 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1907 1902
1908 1903 if (entry != NULL) {
1909 1904 *entry = __ pc();
1910 1905 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1911 1906 BLOCK_COMMENT("Entry:");
1912 1907 }
1913 1908
1914 1909 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1915 1910 // r9 and r10 may be used to save non-volatile registers
1916 1911 if (is_oop) {
1917 1912 __ movq(saved_to, to);
1918 1913 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
1919 1914 }
1920 1915
1921 1916 // 'from', 'to' and 'count' are now valid
1922 1917 __ movptr(dword_count, count);
1923 1918 __ shrptr(count, 1); // count => qword_count
1924 1919
1925 1920 // Copy from low to high addresses. Use 'to' as scratch.
1926 1921 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1927 1922 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1928 1923 __ negptr(qword_count);
1929 1924 __ jmp(L_copy_bytes);
1930 1925
1931 1926 // Copy trailing qwords
1932 1927 __ BIND(L_copy_8_bytes);
1933 1928 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1934 1929 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1935 1930 __ increment(qword_count);
1936 1931 __ jcc(Assembler::notZero, L_copy_8_bytes);
↓ open down ↓ |
631 lines elided |
↑ open up ↑ |
1937 1932
1938 1933 // Check for and copy trailing dword
1939 1934 __ BIND(L_copy_4_bytes);
1940 1935 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1
1941 1936 __ jccb(Assembler::zero, L_exit);
1942 1937 __ movl(rax, Address(end_from, 8));
1943 1938 __ movl(Address(end_to, 8), rax);
1944 1939
1945 1940 __ BIND(L_exit);
1946 1941 if (is_oop) {
1947 - __ leaq(end_to, Address(saved_to, dword_count, Address::times_4, -4));
1948 - gen_write_ref_array_post_barrier(saved_to, end_to, rax);
1942 + gen_write_ref_array_post_barrier(saved_to, dword_count, rax);
1949 1943 }
1950 1944 restore_arg_regs();
1951 1945 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
1952 1946 __ xorptr(rax, rax); // return 0
1953 1947 __ leave(); // required for proper stackwalking of RuntimeStub frame
1954 1948 __ ret(0);
1955 1949
1956 1950 // Copy in multi-bytes chunks
1957 1951 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
1958 1952 __ jmp(L_copy_4_bytes);
1959 1953
1960 1954 return start;
1961 1955 }
1962 1956
1963 1957 // Arguments:
1964 1958 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1965 1959 // ignored
1966 1960 // is_oop - true => oop array, so generate store check code
1967 1961 // name - stub name string
1968 1962 //
1969 1963 // Inputs:
1970 1964 // c_rarg0 - source array address
1971 1965 // c_rarg1 - destination array address
1972 1966 // c_rarg2 - element count, treated as ssize_t, can be zero
1973 1967 //
1974 1968 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
1975 1969 // the hardware handle it. The two dwords within qwords that span
1976 1970 // cache line boundaries will still be loaded and stored atomicly.
1977 1971 //
1978 1972 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target,
1979 1973 address *entry, const char *name,
1980 1974 bool dest_uninitialized = false) {
1981 1975 __ align(CodeEntryAlignment);
1982 1976 StubCodeMark mark(this, "StubRoutines", name);
1983 1977 address start = __ pc();
1984 1978
1985 1979 Label L_copy_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit;
1986 1980 const Register from = rdi; // source array address
1987 1981 const Register to = rsi; // destination array address
1988 1982 const Register count = rdx; // elements count
1989 1983 const Register dword_count = rcx;
1990 1984 const Register qword_count = count;
1991 1985
1992 1986 __ enter(); // required for proper stackwalking of RuntimeStub frame
1993 1987 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1994 1988
1995 1989 if (entry != NULL) {
1996 1990 *entry = __ pc();
1997 1991 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1998 1992 BLOCK_COMMENT("Entry:");
1999 1993 }
2000 1994
2001 1995 array_overlap_test(nooverlap_target, Address::times_4);
2002 1996 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
2003 1997 // r9 and r10 may be used to save non-volatile registers
2004 1998
2005 1999 if (is_oop) {
2006 2000 // no registers are destroyed by this call
2007 2001 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
2008 2002 }
2009 2003
2010 2004 assert_clean_int(count, rax); // Make sure 'count' is clean int.
2011 2005 // 'from', 'to' and 'count' are now valid
2012 2006 __ movptr(dword_count, count);
2013 2007 __ shrptr(count, 1); // count => qword_count
2014 2008
2015 2009 // Copy from high to low addresses. Use 'to' as scratch.
2016 2010
2017 2011 // Check for and copy trailing dword
2018 2012 __ testl(dword_count, 1);
2019 2013 __ jcc(Assembler::zero, L_copy_bytes);
2020 2014 __ movl(rax, Address(from, dword_count, Address::times_4, -4));
2021 2015 __ movl(Address(to, dword_count, Address::times_4, -4), rax);
2022 2016 __ jmp(L_copy_bytes);
2023 2017
2024 2018 // Copy trailing qwords
2025 2019 __ BIND(L_copy_8_bytes);
2026 2020 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
2027 2021 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
2028 2022 __ decrement(qword_count);
2029 2023 __ jcc(Assembler::notZero, L_copy_8_bytes);
2030 2024
2031 2025 if (is_oop) {
2032 2026 __ jmp(L_exit);
↓ open down ↓ |
74 lines elided |
↑ open up ↑ |
2033 2027 }
2034 2028 restore_arg_regs();
2035 2029 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
2036 2030 __ xorptr(rax, rax); // return 0
2037 2031 __ leave(); // required for proper stackwalking of RuntimeStub frame
2038 2032 __ ret(0);
2039 2033
2040 2034 // Copy in multi-bytes chunks
2041 2035 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
2042 2036
2043 - __ bind(L_exit);
2044 - if (is_oop) {
2045 - Register end_to = rdx;
2046 - __ leaq(end_to, Address(to, dword_count, Address::times_4, -4));
2047 - gen_write_ref_array_post_barrier(to, end_to, rax);
2048 - }
2037 + __ BIND(L_exit);
2038 + if (is_oop) {
2039 + gen_write_ref_array_post_barrier(to, dword_count, rax);
2040 + }
2049 2041 restore_arg_regs();
2050 2042 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
2051 2043 __ xorptr(rax, rax); // return 0
2052 2044 __ leave(); // required for proper stackwalking of RuntimeStub frame
2053 2045 __ ret(0);
2054 2046
2055 2047 return start;
2056 2048 }
2057 2049
2058 2050 // Arguments:
2059 2051 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
2060 2052 // ignored
2061 2053 // is_oop - true => oop array, so generate store check code
2062 2054 // name - stub name string
2063 2055 //
2064 2056 // Inputs:
2065 2057 // c_rarg0 - source array address
2066 2058 // c_rarg1 - destination array address
2067 2059 // c_rarg2 - element count, treated as ssize_t, can be zero
2068 2060 //
2069 2061 // Side Effects:
2070 2062 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the
2071 2063 // no-overlap entry point used by generate_conjoint_long_oop_copy().
2072 2064 //
2073 2065 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry,
2074 2066 const char *name, bool dest_uninitialized = false) {
2075 2067 __ align(CodeEntryAlignment);
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
2076 2068 StubCodeMark mark(this, "StubRoutines", name);
2077 2069 address start = __ pc();
2078 2070
2079 2071 Label L_copy_bytes, L_copy_8_bytes, L_exit;
2080 2072 const Register from = rdi; // source array address
2081 2073 const Register to = rsi; // destination array address
2082 2074 const Register qword_count = rdx; // elements count
2083 2075 const Register end_from = from; // source array end address
2084 2076 const Register end_to = rcx; // destination array end address
2085 2077 const Register saved_to = to;
2078 + const Register saved_count = r11;
2086 2079 // End pointers are inclusive, and if count is not zero they point
2087 2080 // to the last unit copied: end_to[0] := end_from[0]
2088 2081
2089 2082 __ enter(); // required for proper stackwalking of RuntimeStub frame
2090 2083 // Save no-overlap entry point for generate_conjoint_long_oop_copy()
2091 2084 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
2092 2085
2093 2086 if (entry != NULL) {
2094 2087 *entry = __ pc();
2095 2088 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2096 2089 BLOCK_COMMENT("Entry:");
2097 2090 }
2098 2091
2099 2092 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
2100 2093 // r9 and r10 may be used to save non-volatile registers
2101 2094 // 'from', 'to' and 'qword_count' are now valid
2102 2095 if (is_oop) {
2096 + // Save to and count for store barrier
2097 + __ movptr(saved_count, qword_count);
2103 2098 // no registers are destroyed by this call
2104 2099 gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized);
2105 2100 }
2106 2101
2107 2102 // Copy from low to high addresses. Use 'to' as scratch.
2108 2103 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
2109 2104 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
2110 2105 __ negptr(qword_count);
2111 2106 __ jmp(L_copy_bytes);
2112 2107
2113 2108 // Copy trailing qwords
2114 2109 __ BIND(L_copy_8_bytes);
2115 2110 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
2116 2111 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
2117 2112 __ increment(qword_count);
2118 2113 __ jcc(Assembler::notZero, L_copy_8_bytes);
2119 2114
2120 2115 if (is_oop) {
2121 2116 __ jmp(L_exit);
2122 2117 } else {
2123 2118 restore_arg_regs();
2124 2119 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
2125 2120 __ xorptr(rax, rax); // return 0
2126 2121 __ leave(); // required for proper stackwalking of RuntimeStub frame
2127 2122 __ ret(0);
2128 2123 }
2129 2124
2130 2125 // Copy in multi-bytes chunks
2131 2126 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
2132 2127
2133 2128 if (is_oop) {
2134 2129 __ BIND(L_exit);
2135 - gen_write_ref_array_post_barrier(saved_to, end_to, rax);
2130 + gen_write_ref_array_post_barrier(saved_to, saved_count, rax);
2136 2131 }
2137 2132 restore_arg_regs();
2138 2133 if (is_oop) {
2139 2134 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free
2140 2135 } else {
2141 2136 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free
2142 2137 }
2143 2138 __ xorptr(rax, rax); // return 0
2144 2139 __ leave(); // required for proper stackwalking of RuntimeStub frame
2145 2140 __ ret(0);
2146 2141
2147 2142 return start;
2148 2143 }
2149 2144
2150 2145 // Arguments:
2151 2146 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
2152 2147 // ignored
2153 2148 // is_oop - true => oop array, so generate store check code
2154 2149 // name - stub name string
2155 2150 //
2156 2151 // Inputs:
2157 2152 // c_rarg0 - source array address
2158 2153 // c_rarg1 - destination array address
2159 2154 // c_rarg2 - element count, treated as ssize_t, can be zero
2160 2155 //
2161 2156 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop,
2162 2157 address nooverlap_target, address *entry,
2163 2158 const char *name, bool dest_uninitialized = false) {
2164 2159 __ align(CodeEntryAlignment);
2165 2160 StubCodeMark mark(this, "StubRoutines", name);
2166 2161 address start = __ pc();
2167 2162
2168 2163 Label L_copy_bytes, L_copy_8_bytes, L_exit;
2169 2164 const Register from = rdi; // source array address
2170 2165 const Register to = rsi; // destination array address
2171 2166 const Register qword_count = rdx; // elements count
2172 2167 const Register saved_count = rcx;
2173 2168
2174 2169 __ enter(); // required for proper stackwalking of RuntimeStub frame
2175 2170 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
2176 2171
2177 2172 if (entry != NULL) {
2178 2173 *entry = __ pc();
2179 2174 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2180 2175 BLOCK_COMMENT("Entry:");
2181 2176 }
2182 2177
2183 2178 array_overlap_test(nooverlap_target, Address::times_8);
2184 2179 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
2185 2180 // r9 and r10 may be used to save non-volatile registers
2186 2181 // 'from', 'to' and 'qword_count' are now valid
2187 2182 if (is_oop) {
2188 2183 // Save to and count for store barrier
2189 2184 __ movptr(saved_count, qword_count);
2190 2185 // No registers are destroyed by this call
2191 2186 gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized);
2192 2187 }
2193 2188
2194 2189 __ jmp(L_copy_bytes);
2195 2190
2196 2191 // Copy trailing qwords
2197 2192 __ BIND(L_copy_8_bytes);
2198 2193 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
2199 2194 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
2200 2195 __ decrement(qword_count);
2201 2196 __ jcc(Assembler::notZero, L_copy_8_bytes);
2202 2197
2203 2198 if (is_oop) {
2204 2199 __ jmp(L_exit);
2205 2200 } else {
2206 2201 restore_arg_regs();
2207 2202 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free
↓ open down ↓ |
62 lines elided |
↑ open up ↑ |
2208 2203 __ xorptr(rax, rax); // return 0
2209 2204 __ leave(); // required for proper stackwalking of RuntimeStub frame
2210 2205 __ ret(0);
2211 2206 }
2212 2207
2213 2208 // Copy in multi-bytes chunks
2214 2209 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
2215 2210
2216 2211 if (is_oop) {
2217 2212 __ BIND(L_exit);
2218 - __ lea(rcx, Address(to, saved_count, Address::times_8, -8));
2219 - gen_write_ref_array_post_barrier(to, rcx, rax);
2213 + gen_write_ref_array_post_barrier(to, saved_count, rax);
2220 2214 }
2221 2215 restore_arg_regs();
2222 2216 if (is_oop) {
2223 2217 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free
2224 2218 } else {
2225 2219 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free
2226 2220 }
2227 2221 __ xorptr(rax, rax); // return 0
2228 2222 __ leave(); // required for proper stackwalking of RuntimeStub frame
2229 2223 __ ret(0);
2230 2224
2231 2225 return start;
2232 2226 }
2233 2227
2234 2228
2235 2229 // Helper for generating a dynamic type check.
2236 2230 // Smashes no registers.
2237 2231 void generate_type_check(Register sub_klass,
2238 2232 Register super_check_offset,
2239 2233 Register super_klass,
2240 2234 Label& L_success) {
2241 2235 assert_different_registers(sub_klass, super_check_offset, super_klass);
2242 2236
2243 2237 BLOCK_COMMENT("type_check:");
2244 2238
2245 2239 Label L_miss;
2246 2240
2247 2241 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL,
2248 2242 super_check_offset);
2249 2243 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL);
2250 2244
2251 2245 // Fall through on failure!
2252 2246 __ BIND(L_miss);
2253 2247 }
2254 2248
2255 2249 //
2256 2250 // Generate checkcasting array copy stub
2257 2251 //
2258 2252 // Input:
2259 2253 // c_rarg0 - source array address
2260 2254 // c_rarg1 - destination array address
2261 2255 // c_rarg2 - element count, treated as ssize_t, can be zero
2262 2256 // c_rarg3 - size_t ckoff (super_check_offset)
2263 2257 // not Win64
2264 2258 // c_rarg4 - oop ckval (super_klass)
2265 2259 // Win64
2266 2260 // rsp+40 - oop ckval (super_klass)
2267 2261 //
2268 2262 // Output:
2269 2263 // rax == 0 - success
2270 2264 // rax == -1^K - failure, where K is partial transfer count
2271 2265 //
2272 2266 address generate_checkcast_copy(const char *name, address *entry,
2273 2267 bool dest_uninitialized = false) {
2274 2268
2275 2269 Label L_load_element, L_store_element, L_do_card_marks, L_done;
2276 2270
2277 2271 // Input registers (after setup_arg_regs)
2278 2272 const Register from = rdi; // source array address
2279 2273 const Register to = rsi; // destination array address
2280 2274 const Register length = rdx; // elements count
2281 2275 const Register ckoff = rcx; // super_check_offset
2282 2276 const Register ckval = r8; // super_klass
2283 2277
2284 2278 // Registers used as temps (r13, r14 are save-on-entry)
2285 2279 const Register end_from = from; // source array end address
2286 2280 const Register end_to = r13; // destination array end address
2287 2281 const Register count = rdx; // -(count_remaining)
2288 2282 const Register r14_length = r14; // saved copy of length
2289 2283 // End pointers are inclusive, and if length is not zero they point
2290 2284 // to the last unit copied: end_to[0] := end_from[0]
2291 2285
2292 2286 const Register rax_oop = rax; // actual oop copied
2293 2287 const Register r11_klass = r11; // oop._klass
2294 2288
2295 2289 //---------------------------------------------------------------
2296 2290 // Assembler stub will be used for this call to arraycopy
2297 2291 // if the two arrays are subtypes of Object[] but the
2298 2292 // destination array type is not equal to or a supertype
2299 2293 // of the source type. Each element must be separately
2300 2294 // checked.
2301 2295
2302 2296 __ align(CodeEntryAlignment);
2303 2297 StubCodeMark mark(this, "StubRoutines", name);
2304 2298 address start = __ pc();
2305 2299
2306 2300 __ enter(); // required for proper stackwalking of RuntimeStub frame
2307 2301
2308 2302 #ifdef ASSERT
2309 2303 // caller guarantees that the arrays really are different
2310 2304 // otherwise, we would have to make conjoint checks
2311 2305 { Label L;
2312 2306 array_overlap_test(L, TIMES_OOP);
2313 2307 __ stop("checkcast_copy within a single array");
2314 2308 __ bind(L);
2315 2309 }
2316 2310 #endif //ASSERT
2317 2311
2318 2312 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx
2319 2313 // ckoff => rcx, ckval => r8
2320 2314 // r9 and r10 may be used to save non-volatile registers
2321 2315 #ifdef _WIN64
2322 2316 // last argument (#4) is on stack on Win64
2323 2317 __ movptr(ckval, Address(rsp, 6 * wordSize));
2324 2318 #endif
2325 2319
2326 2320 // Caller of this entry point must set up the argument registers.
2327 2321 if (entry != NULL) {
2328 2322 *entry = __ pc();
2329 2323 BLOCK_COMMENT("Entry:");
2330 2324 }
2331 2325
2332 2326 // allocate spill slots for r13, r14
2333 2327 enum {
2334 2328 saved_r13_offset,
2335 2329 saved_r14_offset,
2336 2330 saved_rbp_offset
2337 2331 };
2338 2332 __ subptr(rsp, saved_rbp_offset * wordSize);
2339 2333 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13);
2340 2334 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14);
2341 2335
2342 2336 // check that int operands are properly extended to size_t
2343 2337 assert_clean_int(length, rax);
2344 2338 assert_clean_int(ckoff, rax);
2345 2339
2346 2340 #ifdef ASSERT
2347 2341 BLOCK_COMMENT("assert consistent ckoff/ckval");
2348 2342 // The ckoff and ckval must be mutually consistent,
2349 2343 // even though caller generates both.
2350 2344 { Label L;
2351 2345 int sco_offset = in_bytes(Klass::super_check_offset_offset());
2352 2346 __ cmpl(ckoff, Address(ckval, sco_offset));
2353 2347 __ jcc(Assembler::equal, L);
2354 2348 __ stop("super_check_offset inconsistent");
2355 2349 __ bind(L);
2356 2350 }
2357 2351 #endif //ASSERT
2358 2352
2359 2353 // Loop-invariant addresses. They are exclusive end pointers.
2360 2354 Address end_from_addr(from, length, TIMES_OOP, 0);
2361 2355 Address end_to_addr(to, length, TIMES_OOP, 0);
2362 2356 // Loop-variant addresses. They assume post-incremented count < 0.
2363 2357 Address from_element_addr(end_from, count, TIMES_OOP, 0);
2364 2358 Address to_element_addr(end_to, count, TIMES_OOP, 0);
2365 2359
2366 2360 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
2367 2361
2368 2362 // Copy from low to high addresses, indexed from the end of each array.
2369 2363 __ lea(end_from, end_from_addr);
2370 2364 __ lea(end_to, end_to_addr);
2371 2365 __ movptr(r14_length, length); // save a copy of the length
2372 2366 assert(length == count, ""); // else fix next line:
2373 2367 __ negptr(count); // negate and test the length
2374 2368 __ jcc(Assembler::notZero, L_load_element);
2375 2369
2376 2370 // Empty array: Nothing to do.
2377 2371 __ xorptr(rax, rax); // return 0 on (trivial) success
2378 2372 __ jmp(L_done);
2379 2373
2380 2374 // ======== begin loop ========
2381 2375 // (Loop is rotated; its entry is L_load_element.)
2382 2376 // Loop control:
2383 2377 // for (count = -count; count != 0; count++)
2384 2378 // Base pointers src, dst are biased by 8*(count-1),to last element.
2385 2379 __ align(OptoLoopAlignment);
2386 2380
2387 2381 __ BIND(L_store_element);
2388 2382 __ store_heap_oop(to_element_addr, rax_oop); // store the oop
2389 2383 __ increment(count); // increment the count toward zero
2390 2384 __ jcc(Assembler::zero, L_do_card_marks);
2391 2385
2392 2386 // ======== loop entry is here ========
2393 2387 __ BIND(L_load_element);
2394 2388 __ load_heap_oop(rax_oop, from_element_addr); // load the oop
2395 2389 __ testptr(rax_oop, rax_oop);
↓ open down ↓ |
166 lines elided |
↑ open up ↑ |
2396 2390 __ jcc(Assembler::zero, L_store_element);
2397 2391
2398 2392 __ load_klass(r11_klass, rax_oop);// query the object klass
2399 2393 generate_type_check(r11_klass, ckoff, ckval, L_store_element);
2400 2394 // ======== end loop ========
2401 2395
2402 2396 // It was a real error; we must depend on the caller to finish the job.
2403 2397 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops.
2404 2398 // Emit GC store barriers for the oops we have copied (r14 + rdx),
2405 2399 // and report their number to the caller.
2406 - assert_different_registers(rax, r14_length, count, to, end_to, rcx);
2407 - __ lea(end_to, to_element_addr);
2408 - __ addptr(end_to, -heapOopSize); // make an inclusive end pointer
2409 - gen_write_ref_array_post_barrier(to, end_to, rscratch1);
2410 - __ movptr(rax, r14_length); // original oops
2411 - __ addptr(rax, count); // K = (original - remaining) oops
2412 - __ notptr(rax); // report (-1^K) to caller
2413 - __ jmp(L_done);
2400 + assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1);
2401 + Label L_post_barrier;
2402 + __ addptr(r14_length, count); // K = (original - remaining) oops
2403 + __ movptr(rax, r14_length); // save the value
2404 + __ notptr(rax); // report (-1^K) to caller (does not affect flags)
2405 + __ jccb(Assembler::notZero, L_post_barrier);
2406 + __ jmp(L_done); // K == 0, nothing was copied, skip post barrier
2414 2407
2415 2408 // Come here on success only.
2416 2409 __ BIND(L_do_card_marks);
2417 - __ addptr(end_to, -heapOopSize); // make an inclusive end pointer
2418 - gen_write_ref_array_post_barrier(to, end_to, rscratch1);
2419 - __ xorptr(rax, rax); // return 0 on success
2410 + __ xorptr(rax, rax); // return 0 on success
2420 2411
2412 + __ BIND(L_post_barrier);
2413 + gen_write_ref_array_post_barrier(to, r14_length, rscratch1);
2414 +
2421 2415 // Common exit point (success or failure).
2422 2416 __ BIND(L_done);
2423 2417 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize));
2424 2418 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize));
2425 2419 restore_arg_regs();
2426 2420 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free
2427 2421 __ leave(); // required for proper stackwalking of RuntimeStub frame
2428 2422 __ ret(0);
2429 2423
2430 2424 return start;
2431 2425 }
2432 2426
2433 2427 //
2434 2428 // Generate 'unsafe' array copy stub
2435 2429 // Though just as safe as the other stubs, it takes an unscaled
2436 2430 // size_t argument instead of an element count.
2437 2431 //
2438 2432 // Input:
2439 2433 // c_rarg0 - source array address
2440 2434 // c_rarg1 - destination array address
2441 2435 // c_rarg2 - byte count, treated as ssize_t, can be zero
2442 2436 //
2443 2437 // Examines the alignment of the operands and dispatches
2444 2438 // to a long, int, short, or byte copy loop.
2445 2439 //
2446 2440 address generate_unsafe_copy(const char *name,
2447 2441 address byte_copy_entry, address short_copy_entry,
2448 2442 address int_copy_entry, address long_copy_entry) {
2449 2443
2450 2444 Label L_long_aligned, L_int_aligned, L_short_aligned;
2451 2445
2452 2446 // Input registers (before setup_arg_regs)
2453 2447 const Register from = c_rarg0; // source array address
2454 2448 const Register to = c_rarg1; // destination array address
2455 2449 const Register size = c_rarg2; // byte count (size_t)
2456 2450
2457 2451 // Register used as a temp
2458 2452 const Register bits = rax; // test copy of low bits
2459 2453
2460 2454 __ align(CodeEntryAlignment);
2461 2455 StubCodeMark mark(this, "StubRoutines", name);
2462 2456 address start = __ pc();
2463 2457
2464 2458 __ enter(); // required for proper stackwalking of RuntimeStub frame
2465 2459
2466 2460 // bump this on entry, not on exit:
2467 2461 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr);
2468 2462
2469 2463 __ mov(bits, from);
2470 2464 __ orptr(bits, to);
2471 2465 __ orptr(bits, size);
2472 2466
2473 2467 __ testb(bits, BytesPerLong-1);
2474 2468 __ jccb(Assembler::zero, L_long_aligned);
2475 2469
2476 2470 __ testb(bits, BytesPerInt-1);
2477 2471 __ jccb(Assembler::zero, L_int_aligned);
2478 2472
2479 2473 __ testb(bits, BytesPerShort-1);
2480 2474 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry));
2481 2475
2482 2476 __ BIND(L_short_aligned);
2483 2477 __ shrptr(size, LogBytesPerShort); // size => short_count
2484 2478 __ jump(RuntimeAddress(short_copy_entry));
2485 2479
2486 2480 __ BIND(L_int_aligned);
2487 2481 __ shrptr(size, LogBytesPerInt); // size => int_count
2488 2482 __ jump(RuntimeAddress(int_copy_entry));
2489 2483
2490 2484 __ BIND(L_long_aligned);
2491 2485 __ shrptr(size, LogBytesPerLong); // size => qword_count
2492 2486 __ jump(RuntimeAddress(long_copy_entry));
2493 2487
2494 2488 return start;
2495 2489 }
2496 2490
2497 2491 // Perform range checks on the proposed arraycopy.
2498 2492 // Kills temp, but nothing else.
2499 2493 // Also, clean the sign bits of src_pos and dst_pos.
2500 2494 void arraycopy_range_checks(Register src, // source array oop (c_rarg0)
2501 2495 Register src_pos, // source position (c_rarg1)
2502 2496 Register dst, // destination array oo (c_rarg2)
2503 2497 Register dst_pos, // destination position (c_rarg3)
2504 2498 Register length,
2505 2499 Register temp,
2506 2500 Label& L_failed) {
2507 2501 BLOCK_COMMENT("arraycopy_range_checks:");
2508 2502
2509 2503 // if (src_pos + length > arrayOop(src)->length()) FAIL;
2510 2504 __ movl(temp, length);
2511 2505 __ addl(temp, src_pos); // src_pos + length
2512 2506 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes()));
2513 2507 __ jcc(Assembler::above, L_failed);
2514 2508
2515 2509 // if (dst_pos + length > arrayOop(dst)->length()) FAIL;
2516 2510 __ movl(temp, length);
2517 2511 __ addl(temp, dst_pos); // dst_pos + length
2518 2512 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes()));
2519 2513 __ jcc(Assembler::above, L_failed);
2520 2514
2521 2515 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
2522 2516 // Move with sign extension can be used since they are positive.
2523 2517 __ movslq(src_pos, src_pos);
2524 2518 __ movslq(dst_pos, dst_pos);
2525 2519
2526 2520 BLOCK_COMMENT("arraycopy_range_checks done");
2527 2521 }
2528 2522
2529 2523 //
2530 2524 // Generate generic array copy stubs
2531 2525 //
2532 2526 // Input:
2533 2527 // c_rarg0 - src oop
2534 2528 // c_rarg1 - src_pos (32-bits)
2535 2529 // c_rarg2 - dst oop
2536 2530 // c_rarg3 - dst_pos (32-bits)
2537 2531 // not Win64
2538 2532 // c_rarg4 - element count (32-bits)
2539 2533 // Win64
2540 2534 // rsp+40 - element count (32-bits)
2541 2535 //
2542 2536 // Output:
2543 2537 // rax == 0 - success
2544 2538 // rax == -1^K - failure, where K is partial transfer count
2545 2539 //
2546 2540 address generate_generic_copy(const char *name,
2547 2541 address byte_copy_entry, address short_copy_entry,
2548 2542 address int_copy_entry, address oop_copy_entry,
2549 2543 address long_copy_entry, address checkcast_copy_entry) {
2550 2544
2551 2545 Label L_failed, L_failed_0, L_objArray;
2552 2546 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs;
2553 2547
2554 2548 // Input registers
2555 2549 const Register src = c_rarg0; // source array oop
2556 2550 const Register src_pos = c_rarg1; // source position
2557 2551 const Register dst = c_rarg2; // destination array oop
2558 2552 const Register dst_pos = c_rarg3; // destination position
2559 2553 #ifndef _WIN64
2560 2554 const Register length = c_rarg4;
2561 2555 #else
2562 2556 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64
2563 2557 #endif
2564 2558
2565 2559 { int modulus = CodeEntryAlignment;
2566 2560 int target = modulus - 5; // 5 = sizeof jmp(L_failed)
2567 2561 int advance = target - (__ offset() % modulus);
2568 2562 if (advance < 0) advance += modulus;
2569 2563 if (advance > 0) __ nop(advance);
2570 2564 }
2571 2565 StubCodeMark mark(this, "StubRoutines", name);
2572 2566
2573 2567 // Short-hop target to L_failed. Makes for denser prologue code.
2574 2568 __ BIND(L_failed_0);
2575 2569 __ jmp(L_failed);
2576 2570 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed");
2577 2571
2578 2572 __ align(CodeEntryAlignment);
2579 2573 address start = __ pc();
2580 2574
2581 2575 __ enter(); // required for proper stackwalking of RuntimeStub frame
2582 2576
2583 2577 // bump this on entry, not on exit:
2584 2578 inc_counter_np(SharedRuntime::_generic_array_copy_ctr);
2585 2579
2586 2580 //-----------------------------------------------------------------------
2587 2581 // Assembler stub will be used for this call to arraycopy
2588 2582 // if the following conditions are met:
2589 2583 //
2590 2584 // (1) src and dst must not be null.
2591 2585 // (2) src_pos must not be negative.
2592 2586 // (3) dst_pos must not be negative.
2593 2587 // (4) length must not be negative.
2594 2588 // (5) src klass and dst klass should be the same and not NULL.
2595 2589 // (6) src and dst should be arrays.
2596 2590 // (7) src_pos + length must not exceed length of src.
2597 2591 // (8) dst_pos + length must not exceed length of dst.
2598 2592 //
2599 2593
2600 2594 // if (src == NULL) return -1;
2601 2595 __ testptr(src, src); // src oop
2602 2596 size_t j1off = __ offset();
2603 2597 __ jccb(Assembler::zero, L_failed_0);
2604 2598
2605 2599 // if (src_pos < 0) return -1;
2606 2600 __ testl(src_pos, src_pos); // src_pos (32-bits)
2607 2601 __ jccb(Assembler::negative, L_failed_0);
2608 2602
2609 2603 // if (dst == NULL) return -1;
2610 2604 __ testptr(dst, dst); // dst oop
2611 2605 __ jccb(Assembler::zero, L_failed_0);
2612 2606
2613 2607 // if (dst_pos < 0) return -1;
2614 2608 __ testl(dst_pos, dst_pos); // dst_pos (32-bits)
2615 2609 size_t j4off = __ offset();
2616 2610 __ jccb(Assembler::negative, L_failed_0);
2617 2611
2618 2612 // The first four tests are very dense code,
2619 2613 // but not quite dense enough to put four
2620 2614 // jumps in a 16-byte instruction fetch buffer.
2621 2615 // That's good, because some branch predicters
2622 2616 // do not like jumps so close together.
2623 2617 // Make sure of this.
2624 2618 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps");
2625 2619
2626 2620 // registers used as temp
2627 2621 const Register r11_length = r11; // elements count to copy
2628 2622 const Register r10_src_klass = r10; // array klass
2629 2623
2630 2624 // if (length < 0) return -1;
2631 2625 __ movl(r11_length, length); // length (elements count, 32-bits value)
2632 2626 __ testl(r11_length, r11_length);
2633 2627 __ jccb(Assembler::negative, L_failed_0);
2634 2628
2635 2629 __ load_klass(r10_src_klass, src);
2636 2630 #ifdef ASSERT
2637 2631 // assert(src->klass() != NULL);
2638 2632 {
2639 2633 BLOCK_COMMENT("assert klasses not null {");
2640 2634 Label L1, L2;
2641 2635 __ testptr(r10_src_klass, r10_src_klass);
2642 2636 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL
2643 2637 __ bind(L1);
2644 2638 __ stop("broken null klass");
2645 2639 __ bind(L2);
2646 2640 __ load_klass(rax, dst);
2647 2641 __ cmpq(rax, 0);
2648 2642 __ jcc(Assembler::equal, L1); // this would be broken also
2649 2643 BLOCK_COMMENT("} assert klasses not null done");
2650 2644 }
2651 2645 #endif
2652 2646
2653 2647 // Load layout helper (32-bits)
2654 2648 //
2655 2649 // |array_tag| | header_size | element_type | |log2_element_size|
2656 2650 // 32 30 24 16 8 2 0
2657 2651 //
2658 2652 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2659 2653 //
2660 2654
2661 2655 const int lh_offset = in_bytes(Klass::layout_helper_offset());
2662 2656
2663 2657 // Handle objArrays completely differently...
2664 2658 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2665 2659 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh);
2666 2660 __ jcc(Assembler::equal, L_objArray);
2667 2661
2668 2662 // if (src->klass() != dst->klass()) return -1;
2669 2663 __ load_klass(rax, dst);
2670 2664 __ cmpq(r10_src_klass, rax);
2671 2665 __ jcc(Assembler::notEqual, L_failed);
2672 2666
2673 2667 const Register rax_lh = rax; // layout helper
2674 2668 __ movl(rax_lh, Address(r10_src_klass, lh_offset));
2675 2669
2676 2670 // if (!src->is_Array()) return -1;
2677 2671 __ cmpl(rax_lh, Klass::_lh_neutral_value);
2678 2672 __ jcc(Assembler::greaterEqual, L_failed);
2679 2673
2680 2674 // At this point, it is known to be a typeArray (array_tag 0x3).
2681 2675 #ifdef ASSERT
2682 2676 {
2683 2677 BLOCK_COMMENT("assert primitive array {");
2684 2678 Label L;
2685 2679 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift));
2686 2680 __ jcc(Assembler::greaterEqual, L);
2687 2681 __ stop("must be a primitive array");
2688 2682 __ bind(L);
2689 2683 BLOCK_COMMENT("} assert primitive array done");
2690 2684 }
2691 2685 #endif
2692 2686
2693 2687 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2694 2688 r10, L_failed);
2695 2689
2696 2690 // typeArrayKlass
2697 2691 //
2698 2692 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2699 2693 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2700 2694 //
2701 2695
2702 2696 const Register r10_offset = r10; // array offset
2703 2697 const Register rax_elsize = rax_lh; // element size
2704 2698
2705 2699 __ movl(r10_offset, rax_lh);
2706 2700 __ shrl(r10_offset, Klass::_lh_header_size_shift);
2707 2701 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset
2708 2702 __ addptr(src, r10_offset); // src array offset
2709 2703 __ addptr(dst, r10_offset); // dst array offset
2710 2704 BLOCK_COMMENT("choose copy loop based on element size");
2711 2705 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize
2712 2706
2713 2707 // next registers should be set before the jump to corresponding stub
2714 2708 const Register from = c_rarg0; // source array address
2715 2709 const Register to = c_rarg1; // destination array address
2716 2710 const Register count = c_rarg2; // elements count
2717 2711
2718 2712 // 'from', 'to', 'count' registers should be set in such order
2719 2713 // since they are the same as 'src', 'src_pos', 'dst'.
2720 2714
2721 2715 __ BIND(L_copy_bytes);
2722 2716 __ cmpl(rax_elsize, 0);
2723 2717 __ jccb(Assembler::notEqual, L_copy_shorts);
2724 2718 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr
2725 2719 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr
2726 2720 __ movl2ptr(count, r11_length); // length
2727 2721 __ jump(RuntimeAddress(byte_copy_entry));
2728 2722
2729 2723 __ BIND(L_copy_shorts);
2730 2724 __ cmpl(rax_elsize, LogBytesPerShort);
2731 2725 __ jccb(Assembler::notEqual, L_copy_ints);
2732 2726 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr
2733 2727 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr
2734 2728 __ movl2ptr(count, r11_length); // length
2735 2729 __ jump(RuntimeAddress(short_copy_entry));
2736 2730
2737 2731 __ BIND(L_copy_ints);
2738 2732 __ cmpl(rax_elsize, LogBytesPerInt);
2739 2733 __ jccb(Assembler::notEqual, L_copy_longs);
2740 2734 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr
2741 2735 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr
2742 2736 __ movl2ptr(count, r11_length); // length
2743 2737 __ jump(RuntimeAddress(int_copy_entry));
2744 2738
2745 2739 __ BIND(L_copy_longs);
2746 2740 #ifdef ASSERT
2747 2741 {
2748 2742 BLOCK_COMMENT("assert long copy {");
2749 2743 Label L;
2750 2744 __ cmpl(rax_elsize, LogBytesPerLong);
2751 2745 __ jcc(Assembler::equal, L);
2752 2746 __ stop("must be long copy, but elsize is wrong");
2753 2747 __ bind(L);
2754 2748 BLOCK_COMMENT("} assert long copy done");
2755 2749 }
2756 2750 #endif
2757 2751 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr
2758 2752 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr
2759 2753 __ movl2ptr(count, r11_length); // length
2760 2754 __ jump(RuntimeAddress(long_copy_entry));
2761 2755
2762 2756 // objArrayKlass
2763 2757 __ BIND(L_objArray);
2764 2758 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos]
2765 2759
2766 2760 Label L_plain_copy, L_checkcast_copy;
2767 2761 // test array classes for subtyping
2768 2762 __ load_klass(rax, dst);
2769 2763 __ cmpq(r10_src_klass, rax); // usual case is exact equality
2770 2764 __ jcc(Assembler::notEqual, L_checkcast_copy);
2771 2765
2772 2766 // Identically typed arrays can be copied without element-wise checks.
2773 2767 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2774 2768 r10, L_failed);
2775 2769
2776 2770 __ lea(from, Address(src, src_pos, TIMES_OOP,
2777 2771 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
2778 2772 __ lea(to, Address(dst, dst_pos, TIMES_OOP,
2779 2773 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
2780 2774 __ movl2ptr(count, r11_length); // length
2781 2775 __ BIND(L_plain_copy);
2782 2776 __ jump(RuntimeAddress(oop_copy_entry));
2783 2777
2784 2778 __ BIND(L_checkcast_copy);
2785 2779 // live at this point: r10_src_klass, r11_length, rax (dst_klass)
2786 2780 {
2787 2781 // Before looking at dst.length, make sure dst is also an objArray.
2788 2782 __ cmpl(Address(rax, lh_offset), objArray_lh);
2789 2783 __ jcc(Assembler::notEqual, L_failed);
2790 2784
2791 2785 // It is safe to examine both src.length and dst.length.
2792 2786 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2793 2787 rax, L_failed);
2794 2788
2795 2789 const Register r11_dst_klass = r11;
2796 2790 __ load_klass(r11_dst_klass, dst); // reload
2797 2791
2798 2792 // Marshal the base address arguments now, freeing registers.
2799 2793 __ lea(from, Address(src, src_pos, TIMES_OOP,
2800 2794 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
2801 2795 __ lea(to, Address(dst, dst_pos, TIMES_OOP,
2802 2796 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
2803 2797 __ movl(count, length); // length (reloaded)
2804 2798 Register sco_temp = c_rarg3; // this register is free now
2805 2799 assert_different_registers(from, to, count, sco_temp,
2806 2800 r11_dst_klass, r10_src_klass);
2807 2801 assert_clean_int(count, sco_temp);
2808 2802
2809 2803 // Generate the type check.
2810 2804 const int sco_offset = in_bytes(Klass::super_check_offset_offset());
2811 2805 __ movl(sco_temp, Address(r11_dst_klass, sco_offset));
2812 2806 assert_clean_int(sco_temp, rax);
2813 2807 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy);
2814 2808
2815 2809 // Fetch destination element klass from the objArrayKlass header.
2816 2810 int ek_offset = in_bytes(objArrayKlass::element_klass_offset());
2817 2811 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset));
2818 2812 __ movl( sco_temp, Address(r11_dst_klass, sco_offset));
2819 2813 assert_clean_int(sco_temp, rax);
2820 2814
2821 2815 // the checkcast_copy loop needs two extra arguments:
2822 2816 assert(c_rarg3 == sco_temp, "#3 already in place");
2823 2817 // Set up arguments for checkcast_copy_entry.
2824 2818 setup_arg_regs(4);
2825 2819 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris
2826 2820 __ jump(RuntimeAddress(checkcast_copy_entry));
2827 2821 }
2828 2822
2829 2823 __ BIND(L_failed);
2830 2824 __ xorptr(rax, rax);
2831 2825 __ notptr(rax); // return -1
2832 2826 __ leave(); // required for proper stackwalking of RuntimeStub frame
2833 2827 __ ret(0);
2834 2828
2835 2829 return start;
2836 2830 }
2837 2831
2838 2832 void generate_arraycopy_stubs() {
2839 2833 address entry;
2840 2834 address entry_jbyte_arraycopy;
2841 2835 address entry_jshort_arraycopy;
2842 2836 address entry_jint_arraycopy;
2843 2837 address entry_oop_arraycopy;
2844 2838 address entry_jlong_arraycopy;
2845 2839 address entry_checkcast_arraycopy;
2846 2840
2847 2841 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry,
2848 2842 "jbyte_disjoint_arraycopy");
2849 2843 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy,
2850 2844 "jbyte_arraycopy");
2851 2845
2852 2846 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry,
2853 2847 "jshort_disjoint_arraycopy");
2854 2848 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy,
2855 2849 "jshort_arraycopy");
2856 2850
2857 2851 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry,
2858 2852 "jint_disjoint_arraycopy");
2859 2853 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry,
2860 2854 &entry_jint_arraycopy, "jint_arraycopy");
2861 2855
2862 2856 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry,
2863 2857 "jlong_disjoint_arraycopy");
2864 2858 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry,
2865 2859 &entry_jlong_arraycopy, "jlong_arraycopy");
2866 2860
2867 2861
2868 2862 if (UseCompressedOops) {
2869 2863 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry,
2870 2864 "oop_disjoint_arraycopy");
2871 2865 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry,
2872 2866 &entry_oop_arraycopy, "oop_arraycopy");
2873 2867 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry,
2874 2868 "oop_disjoint_arraycopy_uninit",
2875 2869 /*dest_uninitialized*/true);
2876 2870 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry,
2877 2871 NULL, "oop_arraycopy_uninit",
2878 2872 /*dest_uninitialized*/true);
2879 2873 } else {
2880 2874 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry,
2881 2875 "oop_disjoint_arraycopy");
2882 2876 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry,
2883 2877 &entry_oop_arraycopy, "oop_arraycopy");
2884 2878 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry,
2885 2879 "oop_disjoint_arraycopy_uninit",
2886 2880 /*dest_uninitialized*/true);
2887 2881 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry,
2888 2882 NULL, "oop_arraycopy_uninit",
2889 2883 /*dest_uninitialized*/true);
2890 2884 }
2891 2885
2892 2886 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
2893 2887 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL,
2894 2888 /*dest_uninitialized*/true);
2895 2889
2896 2890 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy",
2897 2891 entry_jbyte_arraycopy,
2898 2892 entry_jshort_arraycopy,
2899 2893 entry_jint_arraycopy,
2900 2894 entry_jlong_arraycopy);
2901 2895 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy",
2902 2896 entry_jbyte_arraycopy,
2903 2897 entry_jshort_arraycopy,
2904 2898 entry_jint_arraycopy,
2905 2899 entry_oop_arraycopy,
2906 2900 entry_jlong_arraycopy,
2907 2901 entry_checkcast_arraycopy);
2908 2902
2909 2903 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
2910 2904 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
2911 2905 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
2912 2906 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
2913 2907 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
2914 2908 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
2915 2909
2916 2910 // We don't generate specialized code for HeapWord-aligned source
2917 2911 // arrays, so just use the code we've already generated
2918 2912 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy;
2919 2913 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy;
2920 2914
2921 2915 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy;
2922 2916 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy;
2923 2917
2924 2918 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy;
2925 2919 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy;
2926 2920
2927 2921 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy;
2928 2922 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy;
2929 2923
2930 2924 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy;
2931 2925 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy;
2932 2926
2933 2927 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit;
2934 2928 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit;
2935 2929 }
2936 2930
2937 2931 void generate_math_stubs() {
2938 2932 {
2939 2933 StubCodeMark mark(this, "StubRoutines", "log");
2940 2934 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc();
2941 2935
2942 2936 __ subq(rsp, 8);
2943 2937 __ movdbl(Address(rsp, 0), xmm0);
2944 2938 __ fld_d(Address(rsp, 0));
2945 2939 __ flog();
2946 2940 __ fstp_d(Address(rsp, 0));
2947 2941 __ movdbl(xmm0, Address(rsp, 0));
2948 2942 __ addq(rsp, 8);
2949 2943 __ ret(0);
2950 2944 }
2951 2945 {
2952 2946 StubCodeMark mark(this, "StubRoutines", "log10");
2953 2947 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc();
2954 2948
2955 2949 __ subq(rsp, 8);
2956 2950 __ movdbl(Address(rsp, 0), xmm0);
2957 2951 __ fld_d(Address(rsp, 0));
2958 2952 __ flog10();
2959 2953 __ fstp_d(Address(rsp, 0));
2960 2954 __ movdbl(xmm0, Address(rsp, 0));
2961 2955 __ addq(rsp, 8);
2962 2956 __ ret(0);
2963 2957 }
2964 2958 {
2965 2959 StubCodeMark mark(this, "StubRoutines", "sin");
2966 2960 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc();
2967 2961
2968 2962 __ subq(rsp, 8);
2969 2963 __ movdbl(Address(rsp, 0), xmm0);
2970 2964 __ fld_d(Address(rsp, 0));
2971 2965 __ trigfunc('s');
2972 2966 __ fstp_d(Address(rsp, 0));
2973 2967 __ movdbl(xmm0, Address(rsp, 0));
2974 2968 __ addq(rsp, 8);
2975 2969 __ ret(0);
2976 2970 }
2977 2971 {
2978 2972 StubCodeMark mark(this, "StubRoutines", "cos");
2979 2973 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc();
2980 2974
2981 2975 __ subq(rsp, 8);
2982 2976 __ movdbl(Address(rsp, 0), xmm0);
2983 2977 __ fld_d(Address(rsp, 0));
2984 2978 __ trigfunc('c');
2985 2979 __ fstp_d(Address(rsp, 0));
2986 2980 __ movdbl(xmm0, Address(rsp, 0));
2987 2981 __ addq(rsp, 8);
2988 2982 __ ret(0);
2989 2983 }
2990 2984 {
2991 2985 StubCodeMark mark(this, "StubRoutines", "tan");
2992 2986 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc();
2993 2987
2994 2988 __ subq(rsp, 8);
2995 2989 __ movdbl(Address(rsp, 0), xmm0);
2996 2990 __ fld_d(Address(rsp, 0));
2997 2991 __ trigfunc('t');
2998 2992 __ fstp_d(Address(rsp, 0));
2999 2993 __ movdbl(xmm0, Address(rsp, 0));
3000 2994 __ addq(rsp, 8);
3001 2995 __ ret(0);
3002 2996 }
3003 2997 {
3004 2998 StubCodeMark mark(this, "StubRoutines", "exp");
3005 2999 StubRoutines::_intrinsic_exp = (double (*)(double)) __ pc();
3006 3000
3007 3001 __ subq(rsp, 8);
3008 3002 __ movdbl(Address(rsp, 0), xmm0);
3009 3003 __ fld_d(Address(rsp, 0));
3010 3004 __ exp_with_fallback(0);
3011 3005 __ fstp_d(Address(rsp, 0));
3012 3006 __ movdbl(xmm0, Address(rsp, 0));
3013 3007 __ addq(rsp, 8);
3014 3008 __ ret(0);
3015 3009 }
3016 3010 {
3017 3011 StubCodeMark mark(this, "StubRoutines", "pow");
3018 3012 StubRoutines::_intrinsic_pow = (double (*)(double,double)) __ pc();
3019 3013
3020 3014 __ subq(rsp, 8);
3021 3015 __ movdbl(Address(rsp, 0), xmm1);
3022 3016 __ fld_d(Address(rsp, 0));
3023 3017 __ movdbl(Address(rsp, 0), xmm0);
3024 3018 __ fld_d(Address(rsp, 0));
3025 3019 __ pow_with_fallback(0);
3026 3020 __ fstp_d(Address(rsp, 0));
3027 3021 __ movdbl(xmm0, Address(rsp, 0));
3028 3022 __ addq(rsp, 8);
3029 3023 __ ret(0);
3030 3024 }
3031 3025 }
3032 3026
3033 3027 // AES intrinsic stubs
3034 3028 enum {AESBlockSize = 16};
3035 3029
3036 3030 address generate_key_shuffle_mask() {
3037 3031 __ align(16);
3038 3032 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask");
3039 3033 address start = __ pc();
3040 3034 __ emit_data64( 0x0405060700010203, relocInfo::none );
3041 3035 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none );
3042 3036 return start;
3043 3037 }
3044 3038
3045 3039 // Utility routine for loading a 128-bit key word in little endian format
3046 3040 // can optionally specify that the shuffle mask is already in an xmmregister
3047 3041 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
3048 3042 __ movdqu(xmmdst, Address(key, offset));
3049 3043 if (xmm_shuf_mask != NULL) {
3050 3044 __ pshufb(xmmdst, xmm_shuf_mask);
3051 3045 } else {
3052 3046 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
3053 3047 }
3054 3048 }
3055 3049
3056 3050 // Arguments:
3057 3051 //
3058 3052 // Inputs:
3059 3053 // c_rarg0 - source byte array address
3060 3054 // c_rarg1 - destination byte array address
3061 3055 // c_rarg2 - K (key) in little endian int array
3062 3056 //
3063 3057 address generate_aescrypt_encryptBlock() {
3064 3058 assert(UseAES, "need AES instructions and misaligned SSE support");
3065 3059 __ align(CodeEntryAlignment);
3066 3060 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
3067 3061 Label L_doLast;
3068 3062 address start = __ pc();
3069 3063
3070 3064 const Register from = c_rarg0; // source array address
3071 3065 const Register to = c_rarg1; // destination array address
3072 3066 const Register key = c_rarg2; // key array address
3073 3067 const Register keylen = rax;
3074 3068
3075 3069 const XMMRegister xmm_result = xmm0;
3076 3070 const XMMRegister xmm_key_shuf_mask = xmm1;
3077 3071 // On win64 xmm6-xmm15 must be preserved so don't use them.
3078 3072 const XMMRegister xmm_temp1 = xmm2;
3079 3073 const XMMRegister xmm_temp2 = xmm3;
3080 3074 const XMMRegister xmm_temp3 = xmm4;
3081 3075 const XMMRegister xmm_temp4 = xmm5;
3082 3076
3083 3077 __ enter(); // required for proper stackwalking of RuntimeStub frame
3084 3078
3085 3079 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60}
3086 3080 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
3087 3081
3088 3082 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
3089 3083 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input
3090 3084
3091 3085 // For encryption, the java expanded key ordering is just what we need
3092 3086 // we don't know if the key is aligned, hence not using load-execute form
3093 3087
3094 3088 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask);
3095 3089 __ pxor(xmm_result, xmm_temp1);
3096 3090
3097 3091 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask);
3098 3092 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask);
3099 3093 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask);
3100 3094 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask);
3101 3095
3102 3096 __ aesenc(xmm_result, xmm_temp1);
3103 3097 __ aesenc(xmm_result, xmm_temp2);
3104 3098 __ aesenc(xmm_result, xmm_temp3);
3105 3099 __ aesenc(xmm_result, xmm_temp4);
3106 3100
3107 3101 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask);
3108 3102 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask);
3109 3103 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask);
3110 3104 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask);
3111 3105
3112 3106 __ aesenc(xmm_result, xmm_temp1);
3113 3107 __ aesenc(xmm_result, xmm_temp2);
3114 3108 __ aesenc(xmm_result, xmm_temp3);
3115 3109 __ aesenc(xmm_result, xmm_temp4);
3116 3110
3117 3111 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask);
3118 3112 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask);
3119 3113
3120 3114 __ cmpl(keylen, 44);
3121 3115 __ jccb(Assembler::equal, L_doLast);
3122 3116
3123 3117 __ aesenc(xmm_result, xmm_temp1);
3124 3118 __ aesenc(xmm_result, xmm_temp2);
3125 3119
3126 3120 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask);
3127 3121 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask);
3128 3122
3129 3123 __ cmpl(keylen, 52);
3130 3124 __ jccb(Assembler::equal, L_doLast);
3131 3125
3132 3126 __ aesenc(xmm_result, xmm_temp1);
3133 3127 __ aesenc(xmm_result, xmm_temp2);
3134 3128
3135 3129 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask);
3136 3130 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask);
3137 3131
3138 3132 __ BIND(L_doLast);
3139 3133 __ aesenc(xmm_result, xmm_temp1);
3140 3134 __ aesenclast(xmm_result, xmm_temp2);
3141 3135 __ movdqu(Address(to, 0), xmm_result); // store the result
3142 3136 __ xorptr(rax, rax); // return 0
3143 3137 __ leave(); // required for proper stackwalking of RuntimeStub frame
3144 3138 __ ret(0);
3145 3139
3146 3140 return start;
3147 3141 }
3148 3142
3149 3143
3150 3144 // Arguments:
3151 3145 //
3152 3146 // Inputs:
3153 3147 // c_rarg0 - source byte array address
3154 3148 // c_rarg1 - destination byte array address
3155 3149 // c_rarg2 - K (key) in little endian int array
3156 3150 //
3157 3151 address generate_aescrypt_decryptBlock() {
3158 3152 assert(UseAES, "need AES instructions and misaligned SSE support");
3159 3153 __ align(CodeEntryAlignment);
3160 3154 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
3161 3155 Label L_doLast;
3162 3156 address start = __ pc();
3163 3157
3164 3158 const Register from = c_rarg0; // source array address
3165 3159 const Register to = c_rarg1; // destination array address
3166 3160 const Register key = c_rarg2; // key array address
3167 3161 const Register keylen = rax;
3168 3162
3169 3163 const XMMRegister xmm_result = xmm0;
3170 3164 const XMMRegister xmm_key_shuf_mask = xmm1;
3171 3165 // On win64 xmm6-xmm15 must be preserved so don't use them.
3172 3166 const XMMRegister xmm_temp1 = xmm2;
3173 3167 const XMMRegister xmm_temp2 = xmm3;
3174 3168 const XMMRegister xmm_temp3 = xmm4;
3175 3169 const XMMRegister xmm_temp4 = xmm5;
3176 3170
3177 3171 __ enter(); // required for proper stackwalking of RuntimeStub frame
3178 3172
3179 3173 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60}
3180 3174 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
3181 3175
3182 3176 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
3183 3177 __ movdqu(xmm_result, Address(from, 0));
3184 3178
3185 3179 // for decryption java expanded key ordering is rotated one position from what we want
3186 3180 // so we start from 0x10 here and hit 0x00 last
3187 3181 // we don't know if the key is aligned, hence not using load-execute form
3188 3182 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask);
3189 3183 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask);
3190 3184 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask);
3191 3185 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask);
3192 3186
3193 3187 __ pxor (xmm_result, xmm_temp1);
3194 3188 __ aesdec(xmm_result, xmm_temp2);
3195 3189 __ aesdec(xmm_result, xmm_temp3);
3196 3190 __ aesdec(xmm_result, xmm_temp4);
3197 3191
3198 3192 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask);
3199 3193 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask);
3200 3194 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask);
3201 3195 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask);
3202 3196
3203 3197 __ aesdec(xmm_result, xmm_temp1);
3204 3198 __ aesdec(xmm_result, xmm_temp2);
3205 3199 __ aesdec(xmm_result, xmm_temp3);
3206 3200 __ aesdec(xmm_result, xmm_temp4);
3207 3201
3208 3202 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask);
3209 3203 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask);
3210 3204 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask);
3211 3205
3212 3206 __ cmpl(keylen, 44);
3213 3207 __ jccb(Assembler::equal, L_doLast);
3214 3208
3215 3209 __ aesdec(xmm_result, xmm_temp1);
3216 3210 __ aesdec(xmm_result, xmm_temp2);
3217 3211
3218 3212 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask);
3219 3213 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask);
3220 3214
3221 3215 __ cmpl(keylen, 52);
3222 3216 __ jccb(Assembler::equal, L_doLast);
3223 3217
3224 3218 __ aesdec(xmm_result, xmm_temp1);
3225 3219 __ aesdec(xmm_result, xmm_temp2);
3226 3220
3227 3221 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask);
3228 3222 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask);
3229 3223
3230 3224 __ BIND(L_doLast);
3231 3225 __ aesdec(xmm_result, xmm_temp1);
3232 3226 __ aesdec(xmm_result, xmm_temp2);
3233 3227
3234 3228 // for decryption the aesdeclast operation is always on key+0x00
3235 3229 __ aesdeclast(xmm_result, xmm_temp3);
3236 3230 __ movdqu(Address(to, 0), xmm_result); // store the result
3237 3231 __ xorptr(rax, rax); // return 0
3238 3232 __ leave(); // required for proper stackwalking of RuntimeStub frame
3239 3233 __ ret(0);
3240 3234
3241 3235 return start;
3242 3236 }
3243 3237
3244 3238
3245 3239 // Arguments:
3246 3240 //
3247 3241 // Inputs:
3248 3242 // c_rarg0 - source byte array address
3249 3243 // c_rarg1 - destination byte array address
3250 3244 // c_rarg2 - K (key) in little endian int array
3251 3245 // c_rarg3 - r vector byte array address
3252 3246 // c_rarg4 - input length
3253 3247 //
3254 3248 address generate_cipherBlockChaining_encryptAESCrypt() {
3255 3249 assert(UseAES, "need AES instructions and misaligned SSE support");
3256 3250 __ align(CodeEntryAlignment);
3257 3251 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
3258 3252 address start = __ pc();
3259 3253
3260 3254 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256;
3261 3255 const Register from = c_rarg0; // source array address
3262 3256 const Register to = c_rarg1; // destination array address
3263 3257 const Register key = c_rarg2; // key array address
3264 3258 const Register rvec = c_rarg3; // r byte array initialized from initvector array address
3265 3259 // and left with the results of the last encryption block
3266 3260 #ifndef _WIN64
3267 3261 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16)
3268 3262 #else
3269 3263 const Address len_mem(rsp, 6 * wordSize); // length is on stack on Win64
3270 3264 const Register len_reg = r10; // pick the first volatile windows register
3271 3265 #endif
3272 3266 const Register pos = rax;
3273 3267
3274 3268 // xmm register assignments for the loops below
3275 3269 const XMMRegister xmm_result = xmm0;
3276 3270 const XMMRegister xmm_temp = xmm1;
3277 3271 // keys 0-10 preloaded into xmm2-xmm12
3278 3272 const int XMM_REG_NUM_KEY_FIRST = 2;
3279 3273 const int XMM_REG_NUM_KEY_LAST = 15;
3280 3274 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST);
3281 3275 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10);
3282 3276 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11);
3283 3277 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12);
3284 3278 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13);
3285 3279
3286 3280 __ enter(); // required for proper stackwalking of RuntimeStub frame
3287 3281
3288 3282 #ifdef _WIN64
3289 3283 // on win64, fill len_reg from stack position
3290 3284 __ movl(len_reg, len_mem);
3291 3285 // save the xmm registers which must be preserved 6-15
3292 3286 __ subptr(rsp, -rsp_after_call_off * wordSize);
3293 3287 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
3294 3288 __ movdqu(xmm_save(i), as_XMMRegister(i));
3295 3289 }
3296 3290 #endif
3297 3291
3298 3292 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front
3299 3293 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
3300 3294 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0
3301 3295 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) {
3302 3296 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask);
3303 3297 offset += 0x10;
3304 3298 }
3305 3299 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec
3306 3300
3307 3301 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256))
3308 3302 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
3309 3303 __ cmpl(rax, 44);
3310 3304 __ jcc(Assembler::notEqual, L_key_192_256);
3311 3305
3312 3306 // 128 bit code follows here
3313 3307 __ movptr(pos, 0);
3314 3308 __ align(OptoLoopAlignment);
3315 3309
3316 3310 __ BIND(L_loopTop_128);
3317 3311 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input
3318 3312 __ pxor (xmm_result, xmm_temp); // xor with the current r vector
3319 3313 __ pxor (xmm_result, xmm_key0); // do the aes rounds
3320 3314 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) {
3321 3315 __ aesenc(xmm_result, as_XMMRegister(rnum));
3322 3316 }
3323 3317 __ aesenclast(xmm_result, xmm_key10);
3324 3318 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
3325 3319 // no need to store r to memory until we exit
3326 3320 __ addptr(pos, AESBlockSize);
3327 3321 __ subptr(len_reg, AESBlockSize);
3328 3322 __ jcc(Assembler::notEqual, L_loopTop_128);
3329 3323
3330 3324 __ BIND(L_exit);
3331 3325 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object
3332 3326
3333 3327 #ifdef _WIN64
3334 3328 // restore xmm regs belonging to calling function
3335 3329 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
3336 3330 __ movdqu(as_XMMRegister(i), xmm_save(i));
3337 3331 }
3338 3332 #endif
3339 3333 __ movl(rax, 0); // return 0 (why?)
3340 3334 __ leave(); // required for proper stackwalking of RuntimeStub frame
3341 3335 __ ret(0);
3342 3336
3343 3337 __ BIND(L_key_192_256);
3344 3338 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
3345 3339 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask);
3346 3340 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask);
3347 3341 __ cmpl(rax, 52);
3348 3342 __ jcc(Assembler::notEqual, L_key_256);
3349 3343
3350 3344 // 192-bit code follows here (could be changed to use more xmm registers)
3351 3345 __ movptr(pos, 0);
3352 3346 __ align(OptoLoopAlignment);
3353 3347
3354 3348 __ BIND(L_loopTop_192);
3355 3349 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input
3356 3350 __ pxor (xmm_result, xmm_temp); // xor with the current r vector
3357 3351 __ pxor (xmm_result, xmm_key0); // do the aes rounds
3358 3352 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) {
3359 3353 __ aesenc(xmm_result, as_XMMRegister(rnum));
3360 3354 }
3361 3355 __ aesenclast(xmm_result, xmm_key12);
3362 3356 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
3363 3357 // no need to store r to memory until we exit
3364 3358 __ addptr(pos, AESBlockSize);
3365 3359 __ subptr(len_reg, AESBlockSize);
3366 3360 __ jcc(Assembler::notEqual, L_loopTop_192);
3367 3361 __ jmp(L_exit);
3368 3362
3369 3363 __ BIND(L_key_256);
3370 3364 // 256-bit code follows here (could be changed to use more xmm registers)
3371 3365 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask);
3372 3366 __ movptr(pos, 0);
3373 3367 __ align(OptoLoopAlignment);
3374 3368
3375 3369 __ BIND(L_loopTop_256);
3376 3370 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input
3377 3371 __ pxor (xmm_result, xmm_temp); // xor with the current r vector
3378 3372 __ pxor (xmm_result, xmm_key0); // do the aes rounds
3379 3373 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) {
3380 3374 __ aesenc(xmm_result, as_XMMRegister(rnum));
3381 3375 }
3382 3376 load_key(xmm_temp, key, 0xe0);
3383 3377 __ aesenclast(xmm_result, xmm_temp);
3384 3378 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
3385 3379 // no need to store r to memory until we exit
3386 3380 __ addptr(pos, AESBlockSize);
3387 3381 __ subptr(len_reg, AESBlockSize);
3388 3382 __ jcc(Assembler::notEqual, L_loopTop_256);
3389 3383 __ jmp(L_exit);
3390 3384
3391 3385 return start;
3392 3386 }
3393 3387
3394 3388
3395 3389
3396 3390 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time
3397 3391 // to hide instruction latency
3398 3392 //
3399 3393 // Arguments:
3400 3394 //
3401 3395 // Inputs:
3402 3396 // c_rarg0 - source byte array address
3403 3397 // c_rarg1 - destination byte array address
3404 3398 // c_rarg2 - K (key) in little endian int array
3405 3399 // c_rarg3 - r vector byte array address
3406 3400 // c_rarg4 - input length
3407 3401 //
3408 3402
3409 3403 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
3410 3404 assert(UseAES, "need AES instructions and misaligned SSE support");
3411 3405 __ align(CodeEntryAlignment);
3412 3406 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
3413 3407 address start = __ pc();
3414 3408
3415 3409 Label L_exit, L_key_192_256, L_key_256;
3416 3410 Label L_singleBlock_loopTop_128, L_multiBlock_loopTop_128;
3417 3411 Label L_singleBlock_loopTop_192, L_singleBlock_loopTop_256;
3418 3412 const Register from = c_rarg0; // source array address
3419 3413 const Register to = c_rarg1; // destination array address
3420 3414 const Register key = c_rarg2; // key array address
3421 3415 const Register rvec = c_rarg3; // r byte array initialized from initvector array address
3422 3416 // and left with the results of the last encryption block
3423 3417 #ifndef _WIN64
3424 3418 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16)
3425 3419 #else
3426 3420 const Address len_mem(rsp, 6 * wordSize); // length is on stack on Win64
3427 3421 const Register len_reg = r10; // pick the first volatile windows register
3428 3422 #endif
3429 3423 const Register pos = rax;
3430 3424
3431 3425 // keys 0-10 preloaded into xmm2-xmm12
3432 3426 const int XMM_REG_NUM_KEY_FIRST = 5;
3433 3427 const int XMM_REG_NUM_KEY_LAST = 15;
3434 3428 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST);
3435 3429 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST);
3436 3430
3437 3431 __ enter(); // required for proper stackwalking of RuntimeStub frame
3438 3432
3439 3433 #ifdef _WIN64
3440 3434 // on win64, fill len_reg from stack position
3441 3435 __ movl(len_reg, len_mem);
3442 3436 // save the xmm registers which must be preserved 6-15
3443 3437 __ subptr(rsp, -rsp_after_call_off * wordSize);
3444 3438 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
3445 3439 __ movdqu(xmm_save(i), as_XMMRegister(i));
3446 3440 }
3447 3441 #endif
3448 3442 // the java expanded key ordering is rotated one position from what we want
3449 3443 // so we start from 0x10 here and hit 0x00 last
3450 3444 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front
3451 3445 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
3452 3446 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00
3453 3447 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) {
3454 3448 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask);
3455 3449 offset += 0x10;
3456 3450 }
3457 3451 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask);
3458 3452
3459 3453 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block
3460 3454
3461 3455 // registers holding the four results in the parallelized loop
3462 3456 const XMMRegister xmm_result0 = xmm0;
3463 3457 const XMMRegister xmm_result1 = xmm2;
3464 3458 const XMMRegister xmm_result2 = xmm3;
3465 3459 const XMMRegister xmm_result3 = xmm4;
3466 3460
3467 3461 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec
3468 3462
3469 3463 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256))
3470 3464 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
3471 3465 __ cmpl(rax, 44);
3472 3466 __ jcc(Assembler::notEqual, L_key_192_256);
3473 3467
3474 3468
3475 3469 // 128-bit code follows here, parallelized
3476 3470 __ movptr(pos, 0);
3477 3471 __ align(OptoLoopAlignment);
3478 3472 __ BIND(L_multiBlock_loopTop_128);
3479 3473 __ cmpptr(len_reg, 4*AESBlockSize); // see if at least 4 blocks left
3480 3474 __ jcc(Assembler::less, L_singleBlock_loopTop_128);
3481 3475
3482 3476 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0*AESBlockSize)); // get next 4 blocks into xmmresult registers
3483 3477 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1*AESBlockSize));
3484 3478 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2*AESBlockSize));
3485 3479 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3*AESBlockSize));
3486 3480
3487 3481 #define DoFour(opc, src_reg) \
3488 3482 __ opc(xmm_result0, src_reg); \
3489 3483 __ opc(xmm_result1, src_reg); \
3490 3484 __ opc(xmm_result2, src_reg); \
3491 3485 __ opc(xmm_result3, src_reg);
3492 3486
3493 3487 DoFour(pxor, xmm_key_first);
3494 3488 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
3495 3489 DoFour(aesdec, as_XMMRegister(rnum));
3496 3490 }
3497 3491 DoFour(aesdeclast, xmm_key_last);
3498 3492 // for each result, xor with the r vector of previous cipher block
3499 3493 __ pxor(xmm_result0, xmm_prev_block_cipher);
3500 3494 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0*AESBlockSize));
3501 3495 __ pxor(xmm_result1, xmm_prev_block_cipher);
3502 3496 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1*AESBlockSize));
3503 3497 __ pxor(xmm_result2, xmm_prev_block_cipher);
3504 3498 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2*AESBlockSize));
3505 3499 __ pxor(xmm_result3, xmm_prev_block_cipher);
3506 3500 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3*AESBlockSize)); // this will carry over to next set of blocks
3507 3501
3508 3502 __ movdqu(Address(to, pos, Address::times_1, 0*AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output
3509 3503 __ movdqu(Address(to, pos, Address::times_1, 1*AESBlockSize), xmm_result1);
3510 3504 __ movdqu(Address(to, pos, Address::times_1, 2*AESBlockSize), xmm_result2);
3511 3505 __ movdqu(Address(to, pos, Address::times_1, 3*AESBlockSize), xmm_result3);
3512 3506
3513 3507 __ addptr(pos, 4*AESBlockSize);
3514 3508 __ subptr(len_reg, 4*AESBlockSize);
3515 3509 __ jmp(L_multiBlock_loopTop_128);
3516 3510
3517 3511 // registers used in the non-parallelized loops
3518 3512 // xmm register assignments for the loops below
3519 3513 const XMMRegister xmm_result = xmm0;
3520 3514 const XMMRegister xmm_prev_block_cipher_save = xmm2;
3521 3515 const XMMRegister xmm_key11 = xmm3;
3522 3516 const XMMRegister xmm_key12 = xmm4;
3523 3517 const XMMRegister xmm_temp = xmm4;
3524 3518
3525 3519 __ align(OptoLoopAlignment);
3526 3520 __ BIND(L_singleBlock_loopTop_128);
3527 3521 __ cmpptr(len_reg, 0); // any blocks left??
3528 3522 __ jcc(Assembler::equal, L_exit);
3529 3523 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
3530 3524 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector
3531 3525 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds
3532 3526 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
3533 3527 __ aesdec(xmm_result, as_XMMRegister(rnum));
3534 3528 }
3535 3529 __ aesdeclast(xmm_result, xmm_key_last);
3536 3530 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector
3537 3531 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
3538 3532 // no need to store r to memory until we exit
3539 3533 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block
3540 3534
3541 3535 __ addptr(pos, AESBlockSize);
3542 3536 __ subptr(len_reg, AESBlockSize);
3543 3537 __ jmp(L_singleBlock_loopTop_128);
3544 3538
3545 3539
3546 3540 __ BIND(L_exit);
3547 3541 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object
3548 3542 #ifdef _WIN64
3549 3543 // restore regs belonging to calling function
3550 3544 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
3551 3545 __ movdqu(as_XMMRegister(i), xmm_save(i));
3552 3546 }
3553 3547 #endif
3554 3548 __ movl(rax, 0); // return 0 (why?)
3555 3549 __ leave(); // required for proper stackwalking of RuntimeStub frame
3556 3550 __ ret(0);
3557 3551
3558 3552
3559 3553 __ BIND(L_key_192_256);
3560 3554 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
3561 3555 load_key(xmm_key11, key, 0xb0);
3562 3556 __ cmpl(rax, 52);
3563 3557 __ jcc(Assembler::notEqual, L_key_256);
3564 3558
3565 3559 // 192-bit code follows here (could be optimized to use parallelism)
3566 3560 load_key(xmm_key12, key, 0xc0); // 192-bit key goes up to c0
3567 3561 __ movptr(pos, 0);
3568 3562 __ align(OptoLoopAlignment);
3569 3563
3570 3564 __ BIND(L_singleBlock_loopTop_192);
3571 3565 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
3572 3566 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector
3573 3567 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds
3574 3568 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
3575 3569 __ aesdec(xmm_result, as_XMMRegister(rnum));
3576 3570 }
3577 3571 __ aesdec(xmm_result, xmm_key11);
3578 3572 __ aesdec(xmm_result, xmm_key12);
3579 3573 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0
3580 3574 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector
3581 3575 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
3582 3576 // no need to store r to memory until we exit
3583 3577 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block
3584 3578 __ addptr(pos, AESBlockSize);
3585 3579 __ subptr(len_reg, AESBlockSize);
3586 3580 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_192);
3587 3581 __ jmp(L_exit);
3588 3582
3589 3583 __ BIND(L_key_256);
3590 3584 // 256-bit code follows here (could be optimized to use parallelism)
3591 3585 __ movptr(pos, 0);
3592 3586 __ align(OptoLoopAlignment);
3593 3587
3594 3588 __ BIND(L_singleBlock_loopTop_256);
3595 3589 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
3596 3590 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector
3597 3591 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds
3598 3592 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
3599 3593 __ aesdec(xmm_result, as_XMMRegister(rnum));
3600 3594 }
3601 3595 __ aesdec(xmm_result, xmm_key11);
3602 3596 load_key(xmm_temp, key, 0xc0);
3603 3597 __ aesdec(xmm_result, xmm_temp);
3604 3598 load_key(xmm_temp, key, 0xd0);
3605 3599 __ aesdec(xmm_result, xmm_temp);
3606 3600 load_key(xmm_temp, key, 0xe0); // 256-bit key goes up to e0
3607 3601 __ aesdec(xmm_result, xmm_temp);
3608 3602 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 came from key+0
3609 3603 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector
3610 3604 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
3611 3605 // no need to store r to memory until we exit
3612 3606 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block
3613 3607 __ addptr(pos, AESBlockSize);
3614 3608 __ subptr(len_reg, AESBlockSize);
3615 3609 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_256);
3616 3610 __ jmp(L_exit);
3617 3611
3618 3612 return start;
3619 3613 }
3620 3614
3621 3615
3622 3616
3623 3617 #undef __
3624 3618 #define __ masm->
3625 3619
3626 3620 // Continuation point for throwing of implicit exceptions that are
3627 3621 // not handled in the current activation. Fabricates an exception
3628 3622 // oop and initiates normal exception dispatching in this
3629 3623 // frame. Since we need to preserve callee-saved values (currently
3630 3624 // only for C2, but done for C1 as well) we need a callee-saved oop
3631 3625 // map and therefore have to make these stubs into RuntimeStubs
3632 3626 // rather than BufferBlobs. If the compiler needs all registers to
3633 3627 // be preserved between the fault point and the exception handler
3634 3628 // then it must assume responsibility for that in
3635 3629 // AbstractCompiler::continuation_for_implicit_null_exception or
3636 3630 // continuation_for_implicit_division_by_zero_exception. All other
3637 3631 // implicit exceptions (e.g., NullPointerException or
3638 3632 // AbstractMethodError on entry) are either at call sites or
3639 3633 // otherwise assume that stack unwinding will be initiated, so
3640 3634 // caller saved registers were assumed volatile in the compiler.
3641 3635 address generate_throw_exception(const char* name,
3642 3636 address runtime_entry,
3643 3637 Register arg1 = noreg,
3644 3638 Register arg2 = noreg) {
3645 3639 // Information about frame layout at time of blocking runtime call.
3646 3640 // Note that we only have to preserve callee-saved registers since
3647 3641 // the compilers are responsible for supplying a continuation point
3648 3642 // if they expect all registers to be preserved.
3649 3643 enum layout {
3650 3644 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
3651 3645 rbp_off2,
3652 3646 return_off,
3653 3647 return_off2,
3654 3648 framesize // inclusive of return address
3655 3649 };
3656 3650
3657 3651 int insts_size = 512;
3658 3652 int locs_size = 64;
3659 3653
3660 3654 CodeBuffer code(name, insts_size, locs_size);
3661 3655 OopMapSet* oop_maps = new OopMapSet();
3662 3656 MacroAssembler* masm = new MacroAssembler(&code);
3663 3657
3664 3658 address start = __ pc();
3665 3659
3666 3660 // This is an inlined and slightly modified version of call_VM
3667 3661 // which has the ability to fetch the return PC out of
3668 3662 // thread-local storage and also sets up last_Java_sp slightly
3669 3663 // differently than the real call_VM
3670 3664
3671 3665 __ enter(); // required for proper stackwalking of RuntimeStub frame
3672 3666
3673 3667 assert(is_even(framesize/2), "sp not 16-byte aligned");
3674 3668
3675 3669 // return address and rbp are already in place
3676 3670 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog
3677 3671
3678 3672 int frame_complete = __ pc() - start;
3679 3673
3680 3674 // Set up last_Java_sp and last_Java_fp
3681 3675 address the_pc = __ pc();
3682 3676 __ set_last_Java_frame(rsp, rbp, the_pc);
3683 3677 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack
3684 3678
3685 3679 // Call runtime
3686 3680 if (arg1 != noreg) {
3687 3681 assert(arg2 != c_rarg1, "clobbered");
3688 3682 __ movptr(c_rarg1, arg1);
3689 3683 }
3690 3684 if (arg2 != noreg) {
3691 3685 __ movptr(c_rarg2, arg2);
3692 3686 }
3693 3687 __ movptr(c_rarg0, r15_thread);
3694 3688 BLOCK_COMMENT("call runtime_entry");
3695 3689 __ call(RuntimeAddress(runtime_entry));
3696 3690
3697 3691 // Generate oop map
3698 3692 OopMap* map = new OopMap(framesize, 0);
3699 3693
3700 3694 oop_maps->add_gc_map(the_pc - start, map);
3701 3695
3702 3696 __ reset_last_Java_frame(true, true);
3703 3697
3704 3698 __ leave(); // required for proper stackwalking of RuntimeStub frame
3705 3699
3706 3700 // check for pending exceptions
3707 3701 #ifdef ASSERT
3708 3702 Label L;
3709 3703 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()),
3710 3704 (int32_t) NULL_WORD);
3711 3705 __ jcc(Assembler::notEqual, L);
3712 3706 __ should_not_reach_here();
3713 3707 __ bind(L);
3714 3708 #endif // ASSERT
3715 3709 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3716 3710
3717 3711
3718 3712 // codeBlob framesize is in words (not VMRegImpl::slot_size)
3719 3713 RuntimeStub* stub =
3720 3714 RuntimeStub::new_runtime_stub(name,
3721 3715 &code,
3722 3716 frame_complete,
3723 3717 (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3724 3718 oop_maps, false);
3725 3719 return stub->entry_point();
3726 3720 }
3727 3721
3728 3722 // Initialization
3729 3723 void generate_initial() {
3730 3724 // Generates all stubs and initializes the entry points
3731 3725
3732 3726 // This platform-specific stub is needed by generate_call_stub()
3733 3727 StubRoutines::x86::_mxcsr_std = generate_fp_mask("mxcsr_std", 0x0000000000001F80);
3734 3728
3735 3729 // entry points that exist in all platforms Note: This is code
3736 3730 // that could be shared among different platforms - however the
3737 3731 // benefit seems to be smaller than the disadvantage of having a
3738 3732 // much more complicated generator structure. See also comment in
3739 3733 // stubRoutines.hpp.
3740 3734
3741 3735 StubRoutines::_forward_exception_entry = generate_forward_exception();
3742 3736
3743 3737 StubRoutines::_call_stub_entry =
3744 3738 generate_call_stub(StubRoutines::_call_stub_return_address);
3745 3739
3746 3740 // is referenced by megamorphic call
3747 3741 StubRoutines::_catch_exception_entry = generate_catch_exception();
3748 3742
3749 3743 // atomic calls
3750 3744 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
3751 3745 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr();
3752 3746 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
3753 3747 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
3754 3748 StubRoutines::_atomic_add_entry = generate_atomic_add();
3755 3749 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr();
3756 3750 StubRoutines::_fence_entry = generate_orderaccess_fence();
3757 3751
3758 3752 StubRoutines::_handler_for_unsafe_access_entry =
3759 3753 generate_handler_for_unsafe_access();
3760 3754
3761 3755 // platform dependent
3762 3756 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
3763 3757 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp();
3764 3758
3765 3759 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
3766 3760
3767 3761 // Build this early so it's available for the interpreter.
3768 3762 StubRoutines::_throw_StackOverflowError_entry =
3769 3763 generate_throw_exception("StackOverflowError throw_exception",
3770 3764 CAST_FROM_FN_PTR(address,
3771 3765 SharedRuntime::
3772 3766 throw_StackOverflowError));
3773 3767 }
3774 3768
3775 3769 void generate_all() {
3776 3770 // Generates all stubs and initializes the entry points
3777 3771
3778 3772 // These entry points require SharedInfo::stack0 to be set up in
3779 3773 // non-core builds and need to be relocatable, so they each
3780 3774 // fabricate a RuntimeStub internally.
3781 3775 StubRoutines::_throw_AbstractMethodError_entry =
3782 3776 generate_throw_exception("AbstractMethodError throw_exception",
3783 3777 CAST_FROM_FN_PTR(address,
3784 3778 SharedRuntime::
3785 3779 throw_AbstractMethodError));
3786 3780
3787 3781 StubRoutines::_throw_IncompatibleClassChangeError_entry =
3788 3782 generate_throw_exception("IncompatibleClassChangeError throw_exception",
3789 3783 CAST_FROM_FN_PTR(address,
3790 3784 SharedRuntime::
3791 3785 throw_IncompatibleClassChangeError));
3792 3786
3793 3787 StubRoutines::_throw_NullPointerException_at_call_entry =
3794 3788 generate_throw_exception("NullPointerException at call throw_exception",
3795 3789 CAST_FROM_FN_PTR(address,
3796 3790 SharedRuntime::
3797 3791 throw_NullPointerException_at_call));
3798 3792
3799 3793 // entry points that are platform specific
3800 3794 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup();
3801 3795 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup();
3802 3796 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup();
3803 3797 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup();
3804 3798
3805 3799 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF);
3806 3800 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000);
3807 3801 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF);
3808 3802 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000);
3809 3803
3810 3804 // support for verify_oop (must happen after universe_init)
3811 3805 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
3812 3806
3813 3807 // arraycopy stubs used by compilers
3814 3808 generate_arraycopy_stubs();
3815 3809
3816 3810 generate_math_stubs();
3817 3811
3818 3812 // don't bother generating these AES intrinsic stubs unless global flag is set
3819 3813 if (UseAESIntrinsics) {
3820 3814 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others
3821 3815
3822 3816 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
3823 3817 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
3824 3818 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
3825 3819 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
3826 3820 }
3827 3821 }
3828 3822
3829 3823 public:
3830 3824 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
3831 3825 if (all) {
3832 3826 generate_all();
3833 3827 } else {
3834 3828 generate_initial();
3835 3829 }
3836 3830 }
3837 3831 }; // end class declaration
3838 3832
3839 3833 void StubGenerator_generate(CodeBuffer* code, bool all) {
3840 3834 StubGenerator g(code, all);
3841 3835 }
↓ open down ↓ |
1411 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX