9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "code/debugInfoRec.hpp"
30 #include "code/icBuffer.hpp"
31 #include "code/vtableStubs.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/interp_masm.hpp"
34 #include "logging/log.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "oops/compiledICHolder.hpp"
37 #include "runtime/safepointMechanism.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/vframeArray.hpp"
40 #include "utilities/align.hpp"
41 #include "vmreg_aarch64.inline.hpp"
42 #ifdef COMPILER1
43 #include "c1/c1_Runtime1.hpp"
44 #endif
45 #if COMPILER2_OR_JVMCI
46 #include "adfiles/ad_aarch64.hpp"
47 #include "opto/runtime.hpp"
48 #endif
272 case T_SHORT:
273 case T_INT:
274 if (int_args < Argument::n_int_register_parameters_j) {
275 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
276 } else {
277 regs[i].set1(VMRegImpl::stack2reg(stk_args));
278 stk_args += 2;
279 }
280 break;
281 case T_VOID:
282 // halves of T_LONG or T_DOUBLE
283 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
284 regs[i].set_bad();
285 break;
286 case T_LONG:
287 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
288 // fall through
289 case T_OBJECT:
290 case T_ARRAY:
291 case T_ADDRESS:
292 if (int_args < Argument::n_int_register_parameters_j) {
293 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
294 } else {
295 regs[i].set2(VMRegImpl::stack2reg(stk_args));
296 stk_args += 2;
297 }
298 break;
299 case T_FLOAT:
300 if (fp_args < Argument::n_float_register_parameters_j) {
301 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
302 } else {
303 regs[i].set1(VMRegImpl::stack2reg(stk_args));
304 stk_args += 2;
305 }
306 break;
307 case T_DOUBLE:
308 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
309 if (fp_args < Argument::n_float_register_parameters_j) {
310 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
311 } else {
312 regs[i].set2(VMRegImpl::stack2reg(stk_args));
313 stk_args += 2;
314 }
315 break;
316 default:
317 ShouldNotReachHere();
318 break;
319 }
320 }
321
322 return align_up(stk_args, 2);
323 }
324
325 // Patch the callers callsite with entry to compiled code if it exists.
326 static void patch_callers_callsite(MacroAssembler *masm) {
327 Label L;
328 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
329 __ cbz(rscratch1, L);
330
331 __ enter();
332 __ push_CPU_state();
333
334 // VM needs caller's callsite
335 // VM needs target method
336 // This needs to be a long call since we will relocate this adapter to
337 // the codeBuffer and it may not reach
338
339 #ifndef PRODUCT
340 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
341 #endif
342
343 __ mov(c_rarg0, rmethod);
344 __ mov(c_rarg1, lr);
345 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
346 __ blrt(rscratch1, 2, 0, 0);
347 __ maybe_isb();
348
349 __ pop_CPU_state();
350 // restore sp
351 __ leave();
352 __ bind(L);
353 }
354
355 static void gen_c2i_adapter(MacroAssembler *masm,
356 int total_args_passed,
357 int comp_args_on_stack,
358 const BasicType *sig_bt,
359 const VMRegPair *regs,
360 Label& skip_fixup) {
361 // Before we get into the guts of the C2I adapter, see if we should be here
362 // at all. We've come from compiled code and are attempting to jump to the
363 // interpreter, which means the caller made a static call to get here
364 // (vcalls always get a compiled target if there is one). Check for a
365 // compiled target. If there is one, we need to patch the caller's call.
366 patch_callers_callsite(masm);
367
368 __ bind(skip_fixup);
369
370 int words_pushed = 0;
371
372 // Since all args are passed on the stack, total_args_passed *
373 // Interpreter::stackElementSize is the space we need.
374
375 int extraspace = total_args_passed * Interpreter::stackElementSize;
376
377 __ mov(r13, sp);
378
379 // stack is aligned, keep it that way
380 extraspace = align_up(extraspace, 2*wordSize);
381
382 if (extraspace)
383 __ sub(sp, sp, extraspace);
384
385 // Now write the args into the outgoing interpreter space
386 for (int i = 0; i < total_args_passed; i++) {
387 if (sig_bt[i] == T_VOID) {
388 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
389 continue;
390 }
391
392 // offset to start parameters
393 int st_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
394 int next_off = st_off - Interpreter::stackElementSize;
395
396 // Say 4 args:
397 // i st_off
398 // 0 32 T_LONG
399 // 1 24 T_VOID
400 // 2 16 T_OBJECT
401 // 3 8 T_BOOL
402 // - 0 return address
403 //
404 // However to make thing extra confusing. Because we can fit a long/double in
405 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
406 // leaves one slot empty and only stores to a single slot. In this case the
407 // slot that is occupied is the T_VOID slot. See I said it was confusing.
408
409 VMReg r_1 = regs[i].first();
410 VMReg r_2 = regs[i].second();
411 if (!r_1->is_valid()) {
412 assert(!r_2->is_valid(), "");
413 continue;
414 }
415 if (r_1->is_stack()) {
416 // memory to memory use rscratch1
417 int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
418 + extraspace
419 + words_pushed * wordSize);
420 if (!r_2->is_valid()) {
421 // sign extend??
422 __ ldrw(rscratch1, Address(sp, ld_off));
423 __ str(rscratch1, Address(sp, st_off));
424
425 } else {
426
427 __ ldr(rscratch1, Address(sp, ld_off));
428
429 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
430 // T_DOUBLE and T_LONG use two slots in the interpreter
431 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
432 // ld_off == LSW, ld_off+wordSize == MSW
433 // st_off == MSW, next_off == LSW
434 __ str(rscratch1, Address(sp, next_off));
435 #ifdef ASSERT
436 // Overwrite the unused slot with known junk
437 __ mov(rscratch1, 0xdeadffffdeadaaaaul);
438 __ str(rscratch1, Address(sp, st_off));
439 #endif /* ASSERT */
440 } else {
441 __ str(rscratch1, Address(sp, st_off));
442 }
443 }
444 } else if (r_1->is_Register()) {
445 Register r = r_1->as_Register();
446 if (!r_2->is_valid()) {
447 // must be only an int (or less ) so move only 32bits to slot
448 // why not sign extend??
449 __ str(r, Address(sp, st_off));
450 } else {
451 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
452 // T_DOUBLE and T_LONG use two slots in the interpreter
453 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
454 // long/double in gpr
455 #ifdef ASSERT
456 // Overwrite the unused slot with known junk
457 __ mov(rscratch1, 0xdeadffffdeadaaabul);
458 __ str(rscratch1, Address(sp, st_off));
459 #endif /* ASSERT */
460 __ str(r, Address(sp, next_off));
461 } else {
462 __ str(r, Address(sp, st_off));
463 }
464 }
465 } else {
466 assert(r_1->is_FloatRegister(), "");
467 if (!r_2->is_valid()) {
468 // only a float use just part of the slot
469 __ strs(r_1->as_FloatRegister(), Address(sp, st_off));
470 } else {
471 #ifdef ASSERT
472 // Overwrite the unused slot with known junk
473 __ mov(rscratch1, 0xdeadffffdeadaaacul);
474 __ str(rscratch1, Address(sp, st_off));
475 #endif /* ASSERT */
476 __ strd(r_1->as_FloatRegister(), Address(sp, next_off));
477 }
478 }
479 }
480
481 __ mov(esp, sp); // Interp expects args on caller's expression stack
482
483 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
484 __ br(rscratch1);
485 }
486
487
488 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
489 int total_args_passed,
490 int comp_args_on_stack,
491 const BasicType *sig_bt,
492 const VMRegPair *regs) {
493
494 // Note: r13 contains the senderSP on entry. We must preserve it since
495 // we may do a i2c -> c2i transition if we lose a race where compiled
496 // code goes non-entrant while we get args ready.
497
498 // In addition we use r13 to locate all the interpreter args because
499 // we must align the stack to 16 bytes.
500
501 // Adapters are frameless.
502
503 // An i2c adapter is frameless because the *caller* frame, which is
504 // interpreted, routinely repairs its own esp (from
505 // interpreter_frame_last_sp), even if a callee has modified the
506 // stack pointer. It also recalculates and aligns sp.
507
508 // A c2i adapter is frameless because the *callee* frame, which is
509 // interpreted, routinely repairs its caller's sp (from sender_sp,
510 // which is set up via the senderSP register).
511
512 // In other words, if *either* the caller or callee is interpreted, we can
513 // get the stack pointer repaired after a call.
554 __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
555 __ andr(sp, rscratch1, -16);
556 }
557
558 // Will jump to the compiled code just as if compiled code was doing it.
559 // Pre-load the register-jump target early, to schedule it better.
560 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
561
562 #if INCLUDE_JVMCI
563 if (EnableJVMCI || UseAOT) {
564 // check if this call should be routed towards a specific entry point
565 __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
566 Label no_alternative_target;
567 __ cbz(rscratch2, no_alternative_target);
568 __ mov(rscratch1, rscratch2);
569 __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
570 __ bind(no_alternative_target);
571 }
572 #endif // INCLUDE_JVMCI
573
574 // Now generate the shuffle code.
575 for (int i = 0; i < total_args_passed; i++) {
576 if (sig_bt[i] == T_VOID) {
577 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
578 continue;
579 }
580
581 // Pick up 0, 1 or 2 words from SP+offset.
582
583 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
584 "scrambled load targets?");
585 // Load in argument order going down.
586 int ld_off = (total_args_passed - i - 1)*Interpreter::stackElementSize;
587 // Point to interpreter value (vs. tag)
588 int next_off = ld_off - Interpreter::stackElementSize;
589 //
590 //
591 //
592 VMReg r_1 = regs[i].first();
593 VMReg r_2 = regs[i].second();
594 if (!r_1->is_valid()) {
595 assert(!r_2->is_valid(), "");
596 continue;
597 }
598 if (r_1->is_stack()) {
599 // Convert stack slot to an SP offset (+ wordSize to account for return address )
600 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
601 if (!r_2->is_valid()) {
602 // sign extend???
603 __ ldrsw(rscratch2, Address(esp, ld_off));
604 __ str(rscratch2, Address(sp, st_off));
605 } else {
606 //
607 // We are using two optoregs. This can be either T_OBJECT,
608 // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
609 // two slots but only uses one for thr T_LONG or T_DOUBLE case
610 // So we must adjust where to pick up the data to match the
611 // interpreter.
612 //
613 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
614 // are accessed as negative so LSW is at LOW address
615
616 // ld_off is MSW so get LSW
617 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
618 next_off : ld_off;
619 __ ldr(rscratch2, Address(esp, offset));
620 // st_off is LSW (i.e. reg.first())
621 __ str(rscratch2, Address(sp, st_off));
622 }
623 } else if (r_1->is_Register()) { // Register argument
624 Register r = r_1->as_Register();
625 if (r_2->is_valid()) {
626 //
627 // We are using two VMRegs. This can be either T_OBJECT,
628 // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
629 // two slots but only uses one for thr T_LONG or T_DOUBLE case
630 // So we must adjust where to pick up the data to match the
631 // interpreter.
632
633 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
634 next_off : ld_off;
635
636 // this can be a misaligned move
637 __ ldr(r, Address(esp, offset));
638 } else {
639 // sign extend and use a full word?
640 __ ldrw(r, Address(esp, ld_off));
641 }
642 } else {
643 if (!r_2->is_valid()) {
644 __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
645 } else {
646 __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
647 }
648 }
649 }
650
651 // 6243940 We might end up in handle_wrong_method if
652 // the callee is deoptimized as we race thru here. If that
653 // happens we don't want to take a safepoint because the
654 // caller frame will look interpreted and arguments are now
713 result[idx++] = 'N';
714 break;
715 case T_METADATA:
716 result[idx++] = 'M';
717 break;
718 case T_NARROWKLASS:
719 result[idx++] = 'K';
720 break;
721 default:
722 result[idx++] = '?';
723 break;
724 }
725 }
726 result[idx++] = ')';
727 result[idx] = '\0';
728 }
729 #endif
730
731 // ---------------------------------------------------------------
732 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
733 int total_args_passed,
734 int comp_args_on_stack,
735 const BasicType *sig_bt,
736 const VMRegPair *regs,
737 AdapterFingerPrint* fingerprint) {
738 address i2c_entry = __ pc();
739 #ifdef BUILTIN_SIM
740 char *name = NULL;
741 AArch64Simulator *sim = NULL;
742 size_t len = 65536;
743 if (NotifySimulator) {
744 name = NEW_C_HEAP_ARRAY(char, len, mtInternal);
745 }
746
747 if (name) {
748 generate_i2c_adapter_name(name, total_args_passed, sig_bt);
749 sim = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck);
750 sim->notifyCompile(name, i2c_entry);
751 }
752 #endif
753 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
754
755 address c2i_unverified_entry = __ pc();
756 Label skip_fixup;
757
758 Label ok;
759
760 Register holder = rscratch2;
761 Register receiver = j_rarg0;
762 Register tmp = r10; // A call-clobbered register not used for arg passing
763
764 // -------------------------------------------------------------------------
765 // Generate a C2I adapter. On entry we know rmethod holds the Method* during calls
766 // to the interpreter. The args start out packed in the compiled layout. They
767 // need to be unpacked into the interpreter layout. This will almost always
768 // require some stack space. We grow the current (compiled) stack, then repack
769 // the args. We finally end in a jump to the generic interpreter entry point.
770 // On exit from the interpreter, the interpreter will restore our SP (lest the
771 // compiled code, which relys solely on SP and not FP, get sick).
772
773 {
774 __ block_comment("c2i_unverified_entry {");
775 __ load_klass(rscratch1, receiver);
776 __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
777 __ cmp(rscratch1, tmp);
778 __ ldr(rmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
779 __ br(Assembler::EQ, ok);
780 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
781
782 __ bind(ok);
783 // Method might have been compiled since the call site was patched to
784 // interpreted; if that is the case treat it as a miss so we can get
785 // the call site corrected.
786 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
787 __ cbz(rscratch1, skip_fixup);
788 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
789 __ block_comment("} c2i_unverified_entry");
790 }
791
792 address c2i_entry = __ pc();
793
794 #ifdef BUILTIN_SIM
795 if (name) {
796 name[0] = 'c';
797 name[2] = 'i';
798 sim->notifyCompile(name, c2i_entry);
799 FREE_C_HEAP_ARRAY(char, name, mtInternal);
800 }
801 #endif
802
803 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
804
805 __ flush();
806 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
807 }
808
809 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
810 VMRegPair *regs,
811 VMRegPair *regs2,
812 int total_args_passed) {
813 assert(regs2 == NULL, "not needed on AArch64");
814
815 // We return the amount of VMRegImpl stack slots we need to reserve for all
816 // the arguments NOT counting out_preserve_stack_slots.
817
818 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
819 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5, c_rarg6, c_rarg7
820 };
821 static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
822 c_farg0, c_farg1, c_farg2, c_farg3,
823 c_farg4, c_farg5, c_farg6, c_farg7
824 };
825
826 uint int_args = 0;
3177 __ str(zr, Address(rthread, JavaThread::exception_handler_pc_offset()));
3178 __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
3179 #endif
3180 // Clear the exception oop so GC no longer processes it as a root.
3181 __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
3182
3183 // r0: exception oop
3184 // r8: exception handler
3185 // r4: exception pc
3186 // Jump to handler
3187
3188 __ br(r8);
3189
3190 // Make sure all code is generated
3191 masm->flush();
3192
3193 // Set exception blob
3194 _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3195 }
3196 #endif // COMPILER2_OR_JVMCI
|
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "classfile/symbolTable.hpp"
30 #include "code/debugInfoRec.hpp"
31 #include "code/icBuffer.hpp"
32 #include "code/vtableStubs.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "interpreter/interp_masm.hpp"
35 #include "logging/log.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "oops/compiledICHolder.hpp"
38 #include "runtime/safepointMechanism.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/vframeArray.hpp"
41 #include "utilities/align.hpp"
42 #include "vmreg_aarch64.inline.hpp"
43 #ifdef COMPILER1
44 #include "c1/c1_Runtime1.hpp"
45 #endif
46 #if COMPILER2_OR_JVMCI
47 #include "adfiles/ad_aarch64.hpp"
48 #include "opto/runtime.hpp"
49 #endif
273 case T_SHORT:
274 case T_INT:
275 if (int_args < Argument::n_int_register_parameters_j) {
276 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
277 } else {
278 regs[i].set1(VMRegImpl::stack2reg(stk_args));
279 stk_args += 2;
280 }
281 break;
282 case T_VOID:
283 // halves of T_LONG or T_DOUBLE
284 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
285 regs[i].set_bad();
286 break;
287 case T_LONG:
288 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
289 // fall through
290 case T_OBJECT:
291 case T_ARRAY:
292 case T_ADDRESS:
293 case T_VALUETYPE:
294 if (int_args < Argument::n_int_register_parameters_j) {
295 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
296 } else {
297 regs[i].set2(VMRegImpl::stack2reg(stk_args));
298 stk_args += 2;
299 }
300 break;
301 case T_FLOAT:
302 if (fp_args < Argument::n_float_register_parameters_j) {
303 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
304 } else {
305 regs[i].set1(VMRegImpl::stack2reg(stk_args));
306 stk_args += 2;
307 }
308 break;
309 case T_DOUBLE:
310 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
311 if (fp_args < Argument::n_float_register_parameters_j) {
312 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
313 } else {
314 regs[i].set2(VMRegImpl::stack2reg(stk_args));
315 stk_args += 2;
316 }
317 break;
318 default:
319 ShouldNotReachHere();
320 break;
321 }
322 }
323
324 return align_up(stk_args, 2);
325 }
326
327 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j+1;
328 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
329
330 int SharedRuntime::java_return_convention(const BasicType *sig_bt,
331 VMRegPair *regs,
332 int total_args_passed) {
333
334 // Create the mapping between argument positions and
335 // registers.
336 static const Register INT_ArgReg[java_return_convention_max_int] = {
337 j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7
338 };
339 static const FloatRegister FP_ArgReg[java_return_convention_max_float] = {
340 j_farg0, j_farg1, j_farg2, j_farg3,
341 j_farg4, j_farg5, j_farg6, j_farg7
342 };
343
344
345 uint int_args = 0;
346 uint fp_args = 0;
347
348 for (int i = 0; i < total_args_passed; i++) {
349 switch (sig_bt[i]) {
350 case T_BOOLEAN:
351 case T_CHAR:
352 case T_BYTE:
353 case T_SHORT:
354 case T_INT:
355 if (int_args < Argument::n_int_register_parameters_j) {
356 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
357 int_args ++;
358 } else {
359 // Should we have gurantee here?
360 return -1;
361 }
362 break;
363 case T_VOID:
364 // halves of T_LONG or T_DOUBLE
365 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
366 regs[i].set_bad();
367 break;
368 case T_LONG:
369 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
370 // fall through
371 case T_OBJECT:
372 case T_ARRAY:
373 case T_ADDRESS:
374 // Should T_METADATA be added to java_calling_convention as well ?
375 case T_METADATA:
376 case T_VALUETYPE:
377 if (int_args < Argument::n_int_register_parameters_j) {
378 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
379 int_args ++;
380 } else {
381 return -1;
382 }
383 break;
384 case T_FLOAT:
385 if (fp_args < Argument::n_float_register_parameters_j) {
386 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
387 fp_args ++;
388 } else {
389 return -1;
390 }
391 break;
392 case T_DOUBLE:
393 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
394 if (fp_args < Argument::n_float_register_parameters_j) {
395 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
396 fp_args ++;
397 } else {
398 return -1;
399 }
400 break;
401 default:
402 ShouldNotReachHere();
403 break;
404 }
405 }
406
407 return int_args + fp_args;
408 }
409
410 // Patch the callers callsite with entry to compiled code if it exists.
411 static void patch_callers_callsite(MacroAssembler *masm) {
412 Label L;
413 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
414 __ cbz(rscratch1, L);
415
416 __ enter();
417 __ push_CPU_state();
418
419 // VM needs caller's callsite
420 // VM needs target method
421 // This needs to be a long call since we will relocate this adapter to
422 // the codeBuffer and it may not reach
423
424 #ifndef PRODUCT
425 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
426 #endif
427
428 __ mov(c_rarg0, rmethod);
429 __ mov(c_rarg1, lr);
430 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
431 __ blrt(rscratch1, 2, 0, 0);
432 __ maybe_isb();
433
434 __ pop_CPU_state();
435 // restore sp
436 __ leave();
437 __ bind(L);
438 }
439
440 // For each value type argument, sig includes the list of fields of
441 // the value type. This utility function computes the number of
442 // arguments for the call if value types are passed by reference (the
443 // calling convention the interpreter expects).
444 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) {
445 guarantee(ValueTypePassFieldsAsArgs == false, "Support for ValValueTypePassFieldsAsArgs = true is not implemented");
446
447 int total_args_passed = 0;
448 total_args_passed = sig_extended->length();
449 return total_args_passed;
450 }
451
452 static void gen_c2i_adapter(MacroAssembler *masm,
453 const GrowableArray<SigEntry>* sig_extended,
454 const VMRegPair *regs,
455 Label& skip_fixup) {
456
457 // Before we get into the guts of the C2I adapter, see if we should be here
458 // at all. We've come from compiled code and are attempting to jump to the
459 // interpreter, which means the caller made a static call to get here
460 // (vcalls always get a compiled target if there is one). Check for a
461 // compiled target. If there is one, we need to patch the caller's call.
462 patch_callers_callsite(masm);
463
464 __ bind(skip_fixup);
465
466 int words_pushed = 0;
467
468 // Since all args are passed on the stack, total_args_passed *
469 // Interpreter::stackElementSize is the space we need.
470
471 int total_args_passed = compute_total_args_passed_int(sig_extended);
472 int extraspace = total_args_passed * Interpreter::stackElementSize;
473
474 __ mov(r13, sp);
475
476 // stack is aligned, keep it that way
477 extraspace = align_up(extraspace, 2 * wordSize);
478 if (extraspace)
479 __ sub(sp, sp, extraspace);
480
481 // Now write the args into the outgoing interpreter space
482 for (int i = 0; i < total_args_passed; i++) {
483 BasicType bt = sig_extended->at(i)._bt;
484 if (bt == T_VOID) {
485 //DMS TODO assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
486 continue;
487 }
488
489 // offset to start parameters
490 int st_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
491 int next_off = st_off - Interpreter::stackElementSize;
492
493 // Say 4 args:
494 // i st_off
495 // 0 32 T_LONG
496 // 1 24 T_VOID
497 // 2 16 T_OBJECT
498 // 3 8 T_BOOL
499 // - 0 return address
500 //
501 // However to make thing extra confusing. Because we can fit a long/double in
502 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
503 // leaves one slot empty and only stores to a single slot. In this case the
504 // slot that is occupied is the T_VOID slot. See I said it was confusing.
505
506 VMReg r_1 = regs[i].first();
507 VMReg r_2 = regs[i].second();
508 if (!r_1->is_valid()) {
509 assert(!r_2->is_valid(), "");
510 continue;
511 }
512 if (r_1->is_stack()) {
513 // memory to memory use rscratch1
514 int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace + words_pushed * wordSize);
515 if (!r_2->is_valid()) {
516 // sign extend??
517 __ ldrw(rscratch1, Address(sp, ld_off));
518 __ str(rscratch1, Address(sp, st_off));
519
520 } else {
521
522 __ ldr(rscratch1, Address(sp, ld_off));
523
524 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
525 // T_DOUBLE and T_LONG use two slots in the interpreter
526 if ( bt == T_LONG || bt == T_DOUBLE) {
527 // ld_off == LSW, ld_off+wordSize == MSW
528 // st_off == MSW, next_off == LSW
529 __ str(rscratch1, Address(sp, next_off));
530 #ifdef ASSERT
531 // Overwrite the unused slot with known junk
532 __ mov(rscratch1, 0xdeadffffdeadaaaaul);
533 __ str(rscratch1, Address(sp, st_off));
534 #endif /* ASSERT */
535 } else {
536 __ str(rscratch1, Address(sp, st_off));
537 }
538 }
539 } else if (r_1->is_Register()) {
540 Register r = r_1->as_Register();
541 if (!r_2->is_valid()) {
542 // must be only an int (or less ) so move only 32bits to slot
543 // why not sign extend??
544 __ str(r, Address(sp, st_off));
545 } else {
546 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
547 // T_DOUBLE and T_LONG use two slots in the interpreter
548 if ( bt == T_LONG || bt == T_DOUBLE) {
549 // long/double in gpr
550 #ifdef ASSERT
551 // Overwrite the unused slot with known junk
552 __ mov(rscratch1, 0xdeadffffdeadaaabul);
553 __ str(rscratch1, Address(sp, st_off));
554 #endif /* ASSERT */
555 __ str(r, Address(sp, next_off));
556 } else {
557 __ str(r, Address(sp, st_off));
558 }
559 }
560 } else {
561 assert(r_1->is_FloatRegister(), "");
562 if (!r_2->is_valid()) {
563 // only a float use just part of the slot
564 __ strs(r_1->as_FloatRegister(), Address(sp, st_off));
565 } else {
566 #ifdef ASSERT
567 // Overwrite the unused slot with known junk
568 __ mov(rscratch1, 0xdeadffffdeadaaacul);
569 __ str(rscratch1, Address(sp, st_off));
570 #endif /* ASSERT */
571 __ strd(r_1->as_FloatRegister(), Address(sp, next_off));
572 }
573 }
574 }
575
576 __ mov(esp, sp); // Interp expects args on caller's expression stack
577
578 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
579 __ br(rscratch1);
580 }
581
582
583 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
584 int comp_args_on_stack,
585 const GrowableArray<SigEntry>* sig,
586 const VMRegPair *regs) {
587
588
589 // Note: r13 contains the senderSP on entry. We must preserve it since
590 // we may do a i2c -> c2i transition if we lose a race where compiled
591 // code goes non-entrant while we get args ready.
592
593 // In addition we use r13 to locate all the interpreter args because
594 // we must align the stack to 16 bytes.
595
596 // Adapters are frameless.
597
598 // An i2c adapter is frameless because the *caller* frame, which is
599 // interpreted, routinely repairs its own esp (from
600 // interpreter_frame_last_sp), even if a callee has modified the
601 // stack pointer. It also recalculates and aligns sp.
602
603 // A c2i adapter is frameless because the *callee* frame, which is
604 // interpreted, routinely repairs its caller's sp (from sender_sp,
605 // which is set up via the senderSP register).
606
607 // In other words, if *either* the caller or callee is interpreted, we can
608 // get the stack pointer repaired after a call.
649 __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
650 __ andr(sp, rscratch1, -16);
651 }
652
653 // Will jump to the compiled code just as if compiled code was doing it.
654 // Pre-load the register-jump target early, to schedule it better.
655 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
656
657 #if INCLUDE_JVMCI
658 if (EnableJVMCI || UseAOT) {
659 // check if this call should be routed towards a specific entry point
660 __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
661 Label no_alternative_target;
662 __ cbz(rscratch2, no_alternative_target);
663 __ mov(rscratch1, rscratch2);
664 __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
665 __ bind(no_alternative_target);
666 }
667 #endif // INCLUDE_JVMCI
668
669 int total_args_passed = compute_total_args_passed_int(sig);
670
671 // Now generate the shuffle code.
672 for (int i = 0; i < total_args_passed; i++) {
673 BasicType bt = sig->at(i)._bt;
674 if (bt == T_VOID) {
675 //DMS TODO: assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
676 continue;
677 }
678
679 // Pick up 0, 1 or 2 words from SP+offset.
680
681 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
682 "scrambled load targets?");
683 // Load in argument order going down.
684 int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
685 // Point to interpreter value (vs. tag)
686 int next_off = ld_off - Interpreter::stackElementSize;
687 //
688 //
689 //
690 VMReg r_1 = regs[i].first();
691 VMReg r_2 = regs[i].second();
692 if (!r_1->is_valid()) {
693 assert(!r_2->is_valid(), "");
694 continue;
695 }
696 if (r_1->is_stack()) {
697 // Convert stack slot to an SP offset (+ wordSize to account for return address )
698 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
699 if (!r_2->is_valid()) {
700 // sign extend???
701 __ ldrsw(rscratch2, Address(esp, ld_off));
702 __ str(rscratch2, Address(sp, st_off));
703 } else {
704 //
705 // We are using two optoregs. This can be either T_OBJECT,
706 // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
707 // two slots but only uses one for thr T_LONG or T_DOUBLE case
708 // So we must adjust where to pick up the data to match the
709 // interpreter.
710 //
711 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
712 // are accessed as negative so LSW is at LOW address
713
714 // ld_off is MSW so get LSW
715 const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
716 __ ldr(rscratch2, Address(esp, offset));
717 // st_off is LSW (i.e. reg.first())
718 __ str(rscratch2, Address(sp, st_off));
719 }
720 } else if (r_1->is_Register()) { // Register argument
721 Register r = r_1->as_Register();
722 if (r_2->is_valid()) {
723 //
724 // We are using two VMRegs. This can be either T_OBJECT,
725 // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
726 // two slots but only uses one for thr T_LONG or T_DOUBLE case
727 // So we must adjust where to pick up the data to match the
728 // interpreter.
729
730 const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
731
732 // this can be a misaligned move
733 __ ldr(r, Address(esp, offset));
734 } else {
735 // sign extend and use a full word?
736 __ ldrw(r, Address(esp, ld_off));
737 }
738 } else {
739 if (!r_2->is_valid()) {
740 __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
741 } else {
742 __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
743 }
744 }
745 }
746
747 // 6243940 We might end up in handle_wrong_method if
748 // the callee is deoptimized as we race thru here. If that
749 // happens we don't want to take a safepoint because the
750 // caller frame will look interpreted and arguments are now
809 result[idx++] = 'N';
810 break;
811 case T_METADATA:
812 result[idx++] = 'M';
813 break;
814 case T_NARROWKLASS:
815 result[idx++] = 'K';
816 break;
817 default:
818 result[idx++] = '?';
819 break;
820 }
821 }
822 result[idx++] = ')';
823 result[idx] = '\0';
824 }
825 #endif
826
827 // ---------------------------------------------------------------
828 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
829 int comp_args_on_stack,
830 int comp_args_on_stack_cc,
831 const GrowableArray<SigEntry>* sig,
832 const VMRegPair* regs,
833 const GrowableArray<SigEntry>* sig_cc,
834 const VMRegPair* regs_cc,
835 AdapterFingerPrint* fingerprint,
836 AdapterBlob*& new_adapter) {
837 address i2c_entry = __ pc();
838 #ifdef BUILTIN_SIM
839 char *name = NULL;
840 AArch64Simulator *sim = NULL;
841 size_t len = 65536;
842 if (NotifySimulator) {
843 name = NEW_C_HEAP_ARRAY(char, len, mtInternal);
844 }
845
846 if (name) {
847 generate_i2c_adapter_name(name, total_args_passed, sig_bt);
848 sim = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck);
849 sim->notifyCompile(name, i2c_entry);
850 }
851 #endif
852 gen_i2c_adapter(masm, comp_args_on_stack_cc, sig_cc, regs_cc);
853
854 address c2i_unverified_entry = __ pc();
855 Label skip_fixup;
856
857 Label ok;
858
859 Register holder = rscratch2;
860 Register receiver = j_rarg0;
861 Register tmp = r10; // A call-clobbered register not used for arg passing
862
863 // -------------------------------------------------------------------------
864 // Generate a C2I adapter. On entry we know rmethod holds the Method* during calls
865 // to the interpreter. The args start out packed in the compiled layout. They
866 // need to be unpacked into the interpreter layout. This will almost always
867 // require some stack space. We grow the current (compiled) stack, then repack
868 // the args. We finally end in a jump to the generic interpreter entry point.
869 // On exit from the interpreter, the interpreter will restore our SP (lest the
870 // compiled code, which relys solely on SP and not FP, get sick).
871
872 {
873 __ block_comment("c2i_unverified_entry {");
874 __ load_klass(rscratch1, receiver);
875 __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
876 __ cmp(rscratch1, tmp);
877 __ ldr(rmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
878 __ br(Assembler::EQ, ok);
879 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
880
881 __ bind(ok);
882 // Method might have been compiled since the call site was patched to
883 // interpreted; if that is the case treat it as a miss so we can get
884 // the call site corrected.
885 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
886 __ cbz(rscratch1, skip_fixup);
887 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
888 __ block_comment("} c2i_unverified_entry");
889 }
890
891 address c2i_entry = __ pc();
892 address c2i_value_entry = c2i_entry;
893
894 #ifdef BUILTIN_SIM
895 if (name) {
896 name[0] = 'c';
897 name[2] = 'i';
898 sim->notifyCompile(name, c2i_entry);
899 FREE_C_HEAP_ARRAY(char, name, mtInternal);
900 }
901 #endif
902
903 gen_c2i_adapter(masm, sig_cc, regs_cc, skip_fixup);
904
905 __ flush();
906
907 OopMapSet* oop_maps = NULL;
908
909 int frame_complete = CodeOffsets::frame_never_safe;
910 int frame_size_in_words = 0;
911
912 // The c2i adapter might safepoint and trigger a GC. The caller must make sure that
913 // the GC knows about the location of oop argument locations passed to the c2i adapter.
914 bool caller_must_gc_arguments = (regs != regs_cc);
915 new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments);
916 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_value_entry, c2i_unverified_entry);
917 }
918
919 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
920 VMRegPair *regs,
921 VMRegPair *regs2,
922 int total_args_passed) {
923 assert(regs2 == NULL, "not needed on AArch64");
924
925 // We return the amount of VMRegImpl stack slots we need to reserve for all
926 // the arguments NOT counting out_preserve_stack_slots.
927
928 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
929 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5, c_rarg6, c_rarg7
930 };
931 static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
932 c_farg0, c_farg1, c_farg2, c_farg3,
933 c_farg4, c_farg5, c_farg6, c_farg7
934 };
935
936 uint int_args = 0;
3287 __ str(zr, Address(rthread, JavaThread::exception_handler_pc_offset()));
3288 __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
3289 #endif
3290 // Clear the exception oop so GC no longer processes it as a root.
3291 __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
3292
3293 // r0: exception oop
3294 // r8: exception handler
3295 // r4: exception pc
3296 // Jump to handler
3297
3298 __ br(r8);
3299
3300 // Make sure all code is generated
3301 masm->flush();
3302
3303 // Set exception blob
3304 _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3305 }
3306 #endif // COMPILER2_OR_JVMCI
3307
3308 BufferedValueTypeBlob* SharedRuntime::generate_buffered_value_type_adapter(const ValueKlass* vk) {
3309 BufferBlob* buf = BufferBlob::create("value types pack/unpack", 16 * K);
3310 CodeBuffer buffer(buf);
3311 short buffer_locs[20];
3312 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3313 sizeof(buffer_locs)/sizeof(relocInfo));
3314
3315 MacroAssembler _masm(&buffer);
3316 MacroAssembler* masm = &_masm;
3317
3318 const Array<SigEntry>* sig_vk = vk->extended_sig();
3319 const Array<VMRegPair>* regs = vk->return_regs();
3320
3321 int pack_fields_off = __ offset();
3322
3323 int j = 1;
3324 for (int i = 0; i < sig_vk->length(); i++) {
3325 BasicType bt = sig_vk->at(i)._bt;
3326 if (bt == T_VALUETYPE) {
3327 continue;
3328 }
3329 if (bt == T_VOID) {
3330 if (sig_vk->at(i-1)._bt == T_LONG ||
3331 sig_vk->at(i-1)._bt == T_DOUBLE) {
3332 j++;
3333 }
3334 continue;
3335 }
3336 int off = sig_vk->at(i)._offset;
3337 VMRegPair pair = regs->at(j);
3338 VMReg r_1 = pair.first();
3339 VMReg r_2 = pair.second();
3340 Address to(r0, off);
3341 if (bt == T_FLOAT) {
3342 __ strs(r_1->as_FloatRegister(), to);
3343 } else if (bt == T_DOUBLE) {
3344 __ strd(r_1->as_FloatRegister(), to);
3345 } else if (bt == T_OBJECT || bt == T_ARRAY) {
3346 __ lea(r_1->as_Register(), to);
3347 } else {
3348 assert(is_java_primitive(bt), "unexpected basic type");
3349 size_t size_in_bytes = type2aelembytes(bt);
3350 __ store_sized_value(to, r_1->as_Register(), size_in_bytes);
3351 }
3352 j++;
3353 }
3354 assert(j == regs->length(), "missed a field?");
3355
3356 __ ret(r0);
3357
3358 int unpack_fields_off = __ offset();
3359
3360 j = 1;
3361 for (int i = 0; i < sig_vk->length(); i++) {
3362 BasicType bt = sig_vk->at(i)._bt;
3363 if (bt == T_VALUETYPE) {
3364 continue;
3365 }
3366 if (bt == T_VOID) {
3367 if (sig_vk->at(i-1)._bt == T_LONG ||
3368 sig_vk->at(i-1)._bt == T_DOUBLE) {
3369 j++;
3370 }
3371 continue;
3372 }
3373 int off = sig_vk->at(i)._offset;
3374 VMRegPair pair = regs->at(j);
3375 VMReg r_1 = pair.first();
3376 VMReg r_2 = pair.second();
3377 Address from(r0, off);
3378 if (bt == T_FLOAT) {
3379 __ ldrs(r_1->as_FloatRegister(), from);
3380 } else if (bt == T_DOUBLE) {
3381 __ ldrd(r_1->as_FloatRegister(), from);
3382 } else if (bt == T_OBJECT || bt == T_ARRAY) {
3383 __ lea(r_1->as_Register(), from);
3384 } else {
3385 assert(is_java_primitive(bt), "unexpected basic type");
3386 size_t size_in_bytes = type2aelembytes(bt);
3387 __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
3388 }
3389 j++;
3390 }
3391 assert(j == regs->length(), "missed a field?");
3392
3393 // DMS CHECK:
3394 if (StressValueTypeReturnedAsFields) {
3395 __ load_klass(r0, r0);
3396 __ orr(r0, r0, 1);
3397 }
3398
3399 __ ret(r0);
3400
3401 __ flush();
3402
3403 return BufferedValueTypeBlob::create(&buffer, pack_fields_off, unpack_fields_off);
3404 }
|