< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page




   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"

  29 #include "code/debugInfoRec.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "code/vtableStubs.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interp_masm.hpp"
  34 #include "logging/log.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "oops/compiledICHolder.hpp"
  37 #include "runtime/safepointMechanism.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/vframeArray.hpp"
  40 #include "utilities/align.hpp"
  41 #include "vmreg_aarch64.inline.hpp"
  42 #ifdef COMPILER1
  43 #include "c1/c1_Runtime1.hpp"
  44 #endif
  45 #if COMPILER2_OR_JVMCI
  46 #include "adfiles/ad_aarch64.hpp"
  47 #include "opto/runtime.hpp"
  48 #endif


 271     case T_SHORT:
 272     case T_INT:
 273       if (int_args < Argument::n_int_register_parameters_j) {
 274         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 275       } else {
 276         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 277         stk_args += 2;
 278       }
 279       break;
 280     case T_VOID:
 281       // halves of T_LONG or T_DOUBLE
 282       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 283       regs[i].set_bad();
 284       break;
 285     case T_LONG:
 286       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 287       // fall through
 288     case T_OBJECT:
 289     case T_ARRAY:
 290     case T_ADDRESS:

 291       if (int_args < Argument::n_int_register_parameters_j) {
 292         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 293       } else {
 294         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 295         stk_args += 2;
 296       }
 297       break;
 298     case T_FLOAT:
 299       if (fp_args < Argument::n_float_register_parameters_j) {
 300         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 301       } else {
 302         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 303         stk_args += 2;
 304       }
 305       break;
 306     case T_DOUBLE:
 307       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 308       if (fp_args < Argument::n_float_register_parameters_j) {
 309         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 310       } else {
 311         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 312         stk_args += 2;
 313       }
 314       break;
 315     default:
 316       ShouldNotReachHere();
 317       break;
 318     }
 319   }
 320 
 321   return align_up(stk_args, 2);
 322 }
 323 




















































































 324 // Patch the callers callsite with entry to compiled code if it exists.
 325 static void patch_callers_callsite(MacroAssembler *masm) {
 326   Label L;
 327   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 328   __ cbz(rscratch1, L);
 329 
 330   __ enter();
 331   __ push_CPU_state();
 332 
 333   // VM needs caller's callsite
 334   // VM needs target method
 335   // This needs to be a long call since we will relocate this adapter to
 336   // the codeBuffer and it may not reach
 337 
 338 #ifndef PRODUCT
 339   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 340 #endif
 341 
 342   __ mov(c_rarg0, rmethod);
 343   __ mov(c_rarg1, lr);
 344   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 345   __ blrt(rscratch1, 2, 0, 0);
 346   __ maybe_isb();
 347 
 348   __ pop_CPU_state();
 349   // restore sp
 350   __ leave();
 351   __ bind(L);
 352 }
 353 
 354 static void gen_c2i_adapter(MacroAssembler *masm,
 355                             int total_args_passed,
 356                             int comp_args_on_stack,
 357                             const BasicType *sig_bt,
 358                             const VMRegPair *regs,
 359                             Label& skip_fixup) {
 360   // Before we get into the guts of the C2I adapter, see if we should be here
 361   // at all.  We've come from compiled code and are attempting to jump to the
 362   // interpreter, which means the caller made a static call to get here
 363   // (vcalls always get a compiled target if there is one).  Check for a
 364   // compiled target.  If there is one, we need to patch the caller's call.
 365   patch_callers_callsite(masm);
 366 
 367   __ bind(skip_fixup);
 368 
 369   int words_pushed = 0;
 370 
 371   // Since all args are passed on the stack, total_args_passed *
 372   // Interpreter::stackElementSize is the space we need.
 373 
 374   int extraspace = total_args_passed * Interpreter::stackElementSize;
 375 
 376   __ mov(r13, sp);
 377 
 378   // stack is aligned, keep it that way
 379   extraspace = align_up(extraspace, 2*wordSize);
 380 
 381   if (extraspace)
 382     __ sub(sp, sp, extraspace);
 383 
 384   // Now write the args into the outgoing interpreter space
 385   for (int i = 0; i < total_args_passed; i++) {
 386     if (sig_bt[i] == T_VOID) {
 387       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 388       continue;
 389     }
 390 
 391     // offset to start parameters
 392     int st_off   = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 393     int next_off = st_off - Interpreter::stackElementSize;
 394 
 395     // Say 4 args:
 396     // i   st_off
 397     // 0   32 T_LONG
 398     // 1   24 T_VOID
 399     // 2   16 T_OBJECT
 400     // 3    8 T_BOOL
 401     // -    0 return address
 402     //
 403     // However to make thing extra confusing. Because we can fit a long/double in
 404     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 405     // leaves one slot empty and only stores to a single slot. In this case the
 406     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 407 
 408     VMReg r_1 = regs[i].first();
 409     VMReg r_2 = regs[i].second();



 410     if (!r_1->is_valid()) {
 411       assert(!r_2->is_valid(), "");
 412       continue;
 413     }

 414     if (r_1->is_stack()) {
 415       // memory to memory use rscratch1
 416       int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
 417                     + extraspace
 418                     + words_pushed * wordSize);
 419       if (!r_2->is_valid()) {
 420         // sign extend??
 421         __ ldrw(rscratch1, Address(sp, ld_off));
 422         __ str(rscratch1, Address(sp, st_off));
 423 
 424       } else {
 425 
 426         __ ldr(rscratch1, Address(sp, ld_off));
 427 
 428         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 429         // T_DOUBLE and T_LONG use two slots in the interpreter
 430         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 431           // ld_off == LSW, ld_off+wordSize == MSW
 432           // st_off == MSW, next_off == LSW
 433           __ str(rscratch1, Address(sp, next_off));
 434 #ifdef ASSERT
 435           // Overwrite the unused slot with known junk
 436           __ mov(rscratch1, 0xdeadffffdeadaaaaul);
 437           __ str(rscratch1, Address(sp, st_off));
 438 #endif /* ASSERT */
 439         } else {
 440           __ str(rscratch1, Address(sp, st_off));
 441         }
 442       }
 443     } else if (r_1->is_Register()) {
 444       Register r = r_1->as_Register();
 445       if (!r_2->is_valid()) {
 446         // must be only an int (or less ) so move only 32bits to slot
 447         // why not sign extend??
 448         __ str(r, Address(sp, st_off));
 449       } else {
 450         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 451         // T_DOUBLE and T_LONG use two slots in the interpreter
 452         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 453           // long/double in gpr
 454 #ifdef ASSERT
 455           // Overwrite the unused slot with known junk
 456           __ mov(rscratch1, 0xdeadffffdeadaaabul);
 457           __ str(rscratch1, Address(sp, st_off));
 458 #endif /* ASSERT */
 459           __ str(r, Address(sp, next_off));
 460         } else {
 461           __ str(r, Address(sp, st_off));
 462         }
 463       }
 464     } else {
 465       assert(r_1->is_FloatRegister(), "");
 466       if (!r_2->is_valid()) {
 467         // only a float use just part of the slot
 468         __ strs(r_1->as_FloatRegister(), Address(sp, st_off));
 469       } else {
 470 #ifdef ASSERT
 471         // Overwrite the unused slot with known junk
 472         __ mov(rscratch1, 0xdeadffffdeadaaacul);
 473         __ str(rscratch1, Address(sp, st_off));
 474 #endif /* ASSERT */
 475         __ strd(r_1->as_FloatRegister(), Address(sp, next_off));




























































 476       }













 477     }


 478   }
 479 
 480   __ mov(esp, sp); // Interp expects args on caller's expression stack
 481 
 482   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 483   __ br(rscratch1);
 484 }
 485 

 486 
 487 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 488                                     int total_args_passed,
 489                                     int comp_args_on_stack,
 490                                     const BasicType *sig_bt,
 491                                     const VMRegPair *regs) {
 492 
 493   // Note: r13 contains the senderSP on entry. We must preserve it since
 494   // we may do a i2c -> c2i transition if we lose a race where compiled
 495   // code goes non-entrant while we get args ready.
 496 
 497   // In addition we use r13 to locate all the interpreter args because
 498   // we must align the stack to 16 bytes.
 499 
 500   // Adapters are frameless.
 501 
 502   // An i2c adapter is frameless because the *caller* frame, which is
 503   // interpreted, routinely repairs its own esp (from
 504   // interpreter_frame_last_sp), even if a callee has modified the
 505   // stack pointer.  It also recalculates and aligns sp.
 506 
 507   // A c2i adapter is frameless because the *callee* frame, which is
 508   // interpreted, routinely repairs its caller's sp (from sender_sp,
 509   // which is set up via the senderSP register).
 510 
 511   // In other words, if *either* the caller or callee is interpreted, we can


 531       range_check(masm, rax, r11,
 532                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 533                   L_ok);
 534     if (StubRoutines::code1() != NULL)
 535       range_check(masm, rax, r11,
 536                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
 537                   L_ok);
 538     if (StubRoutines::code2() != NULL)
 539       range_check(masm, rax, r11,
 540                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
 541                   L_ok);
 542     const char* msg = "i2c adapter must return to an interpreter frame";
 543     __ block_comment(msg);
 544     __ stop(msg);
 545     __ bind(L_ok);
 546     __ block_comment("} verify_i2ce ");
 547 #endif
 548   }
 549 
 550   // Cut-out for having no stack args.
 551   int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
 552   if (comp_args_on_stack) {

 553     __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 554     __ andr(sp, rscratch1, -16);
 555   }
 556 
 557   // Will jump to the compiled code just as if compiled code was doing it.
 558   // Pre-load the register-jump target early, to schedule it better.
 559   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
 560 
 561 #if INCLUDE_JVMCI
 562   if (EnableJVMCI || UseAOT) {
 563     // check if this call should be routed towards a specific entry point
 564     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 565     Label no_alternative_target;
 566     __ cbz(rscratch2, no_alternative_target);
 567     __ mov(rscratch1, rscratch2);
 568     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 569     __ bind(no_alternative_target);
 570   }
 571 #endif // INCLUDE_JVMCI
 572 


 573   // Now generate the shuffle code.
 574   for (int i = 0; i < total_args_passed; i++) {
 575     if (sig_bt[i] == T_VOID) {
 576       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");



 577       continue;
 578     }
 579 
 580     // Pick up 0, 1 or 2 words from SP+offset.

 581 
 582     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 583             "scrambled load targets?");
 584     // Load in argument order going down.
 585     int ld_off = (total_args_passed - i - 1)*Interpreter::stackElementSize;
 586     // Point to interpreter value (vs. tag)
 587     int next_off = ld_off - Interpreter::stackElementSize;
 588     //
 589     //
 590     //
 591     VMReg r_1 = regs[i].first();
 592     VMReg r_2 = regs[i].second();
 593     if (!r_1->is_valid()) {
 594       assert(!r_2->is_valid(), "");
 595       continue;
 596     }
 597     if (r_1->is_stack()) {
 598       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 599       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
 600       if (!r_2->is_valid()) {
 601         // sign extend???
 602         __ ldrsw(rscratch2, Address(esp, ld_off));
 603         __ str(rscratch2, Address(sp, st_off));
 604       } else {
 605         //
 606         // We are using two optoregs. This can be either T_OBJECT,
 607         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 608         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 609         // So we must adjust where to pick up the data to match the
 610         // interpreter.
 611         //
 612         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 613         // are accessed as negative so LSW is at LOW address
 614 
 615         // ld_off is MSW so get LSW
 616         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 617                            next_off : ld_off;
 618         __ ldr(rscratch2, Address(esp, offset));
 619         // st_off is LSW (i.e. reg.first())
 620         __ str(rscratch2, Address(sp, st_off));
 621       }
 622     } else if (r_1->is_Register()) {  // Register argument
 623       Register r = r_1->as_Register();
 624       if (r_2->is_valid()) {
 625         //
 626         // We are using two VMRegs. This can be either T_OBJECT,
 627         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 628         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 629         // So we must adjust where to pick up the data to match the
 630         // interpreter.
 631 
 632         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 633                            next_off : ld_off;
 634 
 635         // this can be a misaligned move
 636         __ ldr(r, Address(esp, offset));
 637       } else {
 638         // sign extend and use a full word?
 639         __ ldrw(r, Address(esp, ld_off));
 640       }
 641     } else {
 642       if (!r_2->is_valid()) {
 643         __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 644       } else {
 645         __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 646       }
 647     }
 648   }
 649 

 650   // 6243940 We might end up in handle_wrong_method if
 651   // the callee is deoptimized as we race thru here. If that
 652   // happens we don't want to take a safepoint because the
 653   // caller frame will look interpreted and arguments are now
 654   // "compiled" so it is much better to make this transition
 655   // invisible to the stack walking code. Unfortunately if
 656   // we try and find the callee by normal means a safepoint
 657   // is possible. So we stash the desired callee in the thread
 658   // and the vm will find there should this case occur.
 659 
 660   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
 661 
 662   __ br(rscratch1);
 663 }
 664 
 665 #ifdef BUILTIN_SIM
 666 static void generate_i2c_adapter_name(char *result, int total_args_passed, const BasicType *sig_bt)
 667 {
 668   strcpy(result, "i2c(");
 669   int idx = 4;
 670   for (int i = 0; i < total_args_passed; i++) {
 671     switch(sig_bt[i]) {
 672     case T_BOOLEAN:
 673       result[idx++] = 'Z';
 674       break;
 675     case T_CHAR:
 676       result[idx++] = 'C';
 677       break;
 678     case T_FLOAT:
 679       result[idx++] = 'F';
 680       break;
 681     case T_DOUBLE:


 710       break;
 711     case T_NARROWOOP:
 712       result[idx++] = 'N';
 713       break;
 714     case T_METADATA:
 715       result[idx++] = 'M';
 716       break;
 717     case T_NARROWKLASS:
 718       result[idx++] = 'K';
 719       break;
 720     default:
 721       result[idx++] = '?';
 722       break;
 723     }
 724   }
 725   result[idx++] = ')';
 726   result[idx] = '\0';
 727 }
 728 #endif
 729 
 730 // ---------------------------------------------------------------
 731 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 732                                                             int total_args_passed,
 733                                                             int comp_args_on_stack,
 734                                                             const BasicType *sig_bt,
 735                                                             const VMRegPair *regs,
 736                                                             AdapterFingerPrint* fingerprint) {
 737   address i2c_entry = __ pc();
 738 #ifdef BUILTIN_SIM
 739   char *name = NULL;
 740   AArch64Simulator *sim = NULL;
 741   size_t len = 65536;
 742   if (NotifySimulator) {
 743     name = NEW_C_HEAP_ARRAY(char, len, mtInternal);
 744   }
 745 
 746   if (name) {
 747     generate_i2c_adapter_name(name, total_args_passed, sig_bt);
 748     sim = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck);
 749     sim->notifyCompile(name, i2c_entry);
 750   }
 751 #endif
 752   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 753 
 754   address c2i_unverified_entry = __ pc();
 755   Label skip_fixup;
 756 
 757   Label ok;
 758 
 759   Register holder = rscratch2;
 760   Register receiver = j_rarg0;
 761   Register tmp = r10;  // A call-clobbered register not used for arg passing
 762 
 763   // -------------------------------------------------------------------------
 764   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
 765   // to the interpreter.  The args start out packed in the compiled layout.  They
 766   // need to be unpacked into the interpreter layout.  This will almost always
 767   // require some stack space.  We grow the current (compiled) stack, then repack
 768   // the args.  We  finally end in a jump to the generic interpreter entry point.
 769   // On exit from the interpreter, the interpreter will restore our SP (lest the
 770   // compiled code, which relys solely on SP and not FP, get sick).
 771 
 772   {
 773     __ block_comment("c2i_unverified_entry {");
 774     __ load_klass(rscratch1, receiver);
 775     __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
 776     __ cmp(rscratch1, tmp);
 777     __ ldr(rmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
 778     __ br(Assembler::EQ, ok);
 779     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 780 
 781     __ bind(ok);
 782     // Method might have been compiled since the call site was patched to
 783     // interpreted; if that is the case treat it as a miss so we can get
 784     // the call site corrected.
 785     __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 786     __ cbz(rscratch1, skip_fixup);
 787     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 788     __ block_comment("} c2i_unverified_entry");
 789   }
 790 
 791   address c2i_entry = __ pc();
 792 
 793 #ifdef BUILTIN_SIM
 794   if (name) {
 795     name[0] = 'c';
 796     name[2] = 'i';
 797     sim->notifyCompile(name, c2i_entry);
 798     FREE_C_HEAP_ARRAY(char, name, mtInternal);





























 799   }
 800 #endif
 801 
 802   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);





















 803 
 804   __ flush();
 805   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);








 806 }
 807 
 808 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 809                                          VMRegPair *regs,
 810                                          VMRegPair *regs2,
 811                                          int total_args_passed) {
 812   assert(regs2 == NULL, "not needed on AArch64");
 813 
 814 // We return the amount of VMRegImpl stack slots we need to reserve for all
 815 // the arguments NOT counting out_preserve_stack_slots.
 816 
 817     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 818       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
 819     };
 820     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 821       c_farg0, c_farg1, c_farg2, c_farg3,
 822       c_farg4, c_farg5, c_farg6, c_farg7
 823     };
 824 
 825     uint int_args = 0;


 828 
 829     for (int i = 0; i < total_args_passed; i++) {
 830       switch (sig_bt[i]) {
 831       case T_BOOLEAN:
 832       case T_CHAR:
 833       case T_BYTE:
 834       case T_SHORT:
 835       case T_INT:
 836         if (int_args < Argument::n_int_register_parameters_c) {
 837           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 838         } else {
 839           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 840           stk_args += 2;
 841         }
 842         break;
 843       case T_LONG:
 844         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 845         // fall through
 846       case T_OBJECT:
 847       case T_ARRAY:

 848       case T_ADDRESS:
 849       case T_METADATA:
 850         if (int_args < Argument::n_int_register_parameters_c) {
 851           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 852         } else {
 853           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 854           stk_args += 2;
 855         }
 856         break;
 857       case T_FLOAT:
 858         if (fp_args < Argument::n_float_register_parameters_c) {
 859           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 860         } else {
 861           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 862           stk_args += 2;
 863         }
 864         break;
 865       case T_DOUBLE:
 866         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 867         if (fp_args < Argument::n_float_register_parameters_c) {


1704       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1705     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1706       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1707     }
1708 #endif /* ASSERT */
1709     switch (in_sig_bt[i]) {
1710       case T_ARRAY:
1711         if (is_critical_native) {
1712           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
1713           c_arg++;
1714 #ifdef ASSERT
1715           if (out_regs[c_arg].first()->is_Register()) {
1716             reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1717           } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1718             freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1719           }
1720 #endif
1721           int_args++;
1722           break;
1723         }

1724       case T_OBJECT:
1725         assert(!is_critical_native, "no oop arguments");
1726         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1727                     ((i == 0) && (!is_static)),
1728                     &receiver_offset);
1729         int_args++;
1730         break;
1731       case T_VOID:
1732         break;
1733 
1734       case T_FLOAT:
1735         float_move(masm, in_regs[i], out_regs[c_arg]);
1736         float_args++;
1737         break;
1738 
1739       case T_DOUBLE:
1740         assert( i + 1 < total_in_args &&
1741                 in_sig_bt[i + 1] == T_VOID &&
1742                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1743         double_move(masm, in_regs[i], out_regs[c_arg]);


1885   }
1886 
1887   // Now set thread in native
1888   __ mov(rscratch1, _thread_in_native);
1889   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1890   __ stlrw(rscratch1, rscratch2);
1891 
1892   {
1893     int return_type = 0;
1894     switch (ret_type) {
1895     case T_VOID: break;
1896       return_type = 0; break;
1897     case T_CHAR:
1898     case T_BYTE:
1899     case T_SHORT:
1900     case T_INT:
1901     case T_BOOLEAN:
1902     case T_LONG:
1903       return_type = 1; break;
1904     case T_ARRAY:

1905     case T_OBJECT:
1906       return_type = 1; break;
1907     case T_FLOAT:
1908       return_type = 2; break;
1909     case T_DOUBLE:
1910       return_type = 3; break;
1911     default:
1912       ShouldNotReachHere();
1913     }
1914     rt_call(masm, native_func,
1915             int_args + 2, // AArch64 passes up to 8 args in int registers
1916             float_args,   // and up to 8 float args
1917             return_type);
1918   }
1919 
1920   __ bind(native_return);
1921 
1922   intptr_t return_pc = (intptr_t) __ pc();
1923   oop_maps->add_gc_map(return_pc - start, map);
1924 
1925   // Unpack native results.
1926   switch (ret_type) {
1927   case T_BOOLEAN: __ c2bool(r0);                     break;
1928   case T_CHAR   : __ ubfx(r0, r0, 0, 16);            break;
1929   case T_BYTE   : __ sbfx(r0, r0, 0, 8);             break;
1930   case T_SHORT  : __ sbfx(r0, r0, 0, 16);            break;
1931   case T_INT    : __ sbfx(r0, r0, 0, 32);            break;
1932   case T_DOUBLE :
1933   case T_FLOAT  :
1934     // Result is in v0 we'll save as needed
1935     break;
1936   case T_ARRAY:                 // Really a handle

1937   case T_OBJECT:                // Really a handle
1938       break; // can't de-handlize until after safepoint check
1939   case T_VOID: break;
1940   case T_LONG: break;
1941   default       : ShouldNotReachHere();
1942   }
1943 
1944   // Switch thread to "native transition" state before reading the synchronization state.
1945   // This additional state is necessary because reading and testing the synchronization
1946   // state is not atomic w.r.t. GC, as this scenario demonstrates:
1947   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1948   //     VM thread changes sync state to synchronizing and suspends threads for GC.
1949   //     Thread A is resumed to finish this native method, but doesn't block here since it
1950   //     didn't see any synchronization is progress, and escapes.
1951   __ mov(rscratch1, _thread_in_native_trans);
1952 
1953   __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
1954 
1955   // Force this write out before the read below
1956   __ dmb(Assembler::ISH);


2021     __ bind(unlock_done);
2022     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2023       restore_native_result(masm, ret_type, stack_slots);
2024     }
2025 
2026     __ bind(done);
2027   }
2028 
2029   Label dtrace_method_exit, dtrace_method_exit_done;
2030   {
2031     unsigned long offset;
2032     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
2033     __ ldrb(rscratch1, Address(rscratch1, offset));
2034     __ cbnzw(rscratch1, dtrace_method_exit);
2035     __ bind(dtrace_method_exit_done);
2036   }
2037 
2038   __ reset_last_Java_frame(false);
2039 
2040   // Unbox oop result, e.g. JNIHandles::resolve result.
2041   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2042     __ resolve_jobject(r0, rthread, rscratch2);
2043   }
2044 
2045   if (CheckJNICalls) {
2046     // clear_pending_jni_exception_check
2047     __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
2048   }
2049 
2050   if (!is_critical_native) {
2051     // reset handle block
2052     __ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
2053     __ str(zr, Address(r2, JNIHandleBlock::top_offset_in_bytes()));
2054   }
2055 
2056   __ leave();
2057 
2058   if (!is_critical_native) {
2059     // Any exception pending?
2060     __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2061     __ cbnz(rscratch1, exception_pending);


3177   __ str(zr, Address(rthread, JavaThread::exception_handler_pc_offset()));
3178   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
3179 #endif
3180   // Clear the exception oop so GC no longer processes it as a root.
3181   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
3182 
3183   // r0: exception oop
3184   // r8:  exception handler
3185   // r4: exception pc
3186   // Jump to handler
3187 
3188   __ br(r8);
3189 
3190   // Make sure all code is generated
3191   masm->flush();
3192 
3193   // Set exception blob
3194   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3195 }
3196 #endif // COMPILER2_OR_JVMCI











































































































   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "classfile/symbolTable.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "code/icBuffer.hpp"
  32 #include "code/vtableStubs.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "interpreter/interp_masm.hpp"
  35 #include "logging/log.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "oops/compiledICHolder.hpp"
  38 #include "runtime/safepointMechanism.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/vframeArray.hpp"
  41 #include "utilities/align.hpp"
  42 #include "vmreg_aarch64.inline.hpp"
  43 #ifdef COMPILER1
  44 #include "c1/c1_Runtime1.hpp"
  45 #endif
  46 #if COMPILER2_OR_JVMCI
  47 #include "adfiles/ad_aarch64.hpp"
  48 #include "opto/runtime.hpp"
  49 #endif


 272     case T_SHORT:
 273     case T_INT:
 274       if (int_args < Argument::n_int_register_parameters_j) {
 275         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 276       } else {
 277         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 278         stk_args += 2;
 279       }
 280       break;
 281     case T_VOID:
 282       // halves of T_LONG or T_DOUBLE
 283       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 284       regs[i].set_bad();
 285       break;
 286     case T_LONG:
 287       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 288       // fall through
 289     case T_OBJECT:
 290     case T_ARRAY:
 291     case T_ADDRESS:
 292     case T_VALUETYPE:
 293       if (int_args < Argument::n_int_register_parameters_j) {
 294         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 295       } else {
 296         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 297         stk_args += 2;
 298       }
 299       break;
 300     case T_FLOAT:
 301       if (fp_args < Argument::n_float_register_parameters_j) {
 302         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 303       } else {
 304         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 305         stk_args += 2;
 306       }
 307       break;
 308     case T_DOUBLE:
 309       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 310       if (fp_args < Argument::n_float_register_parameters_j) {
 311         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 312       } else {
 313         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 314         stk_args += 2;
 315       }
 316       break;
 317     default:
 318       ShouldNotReachHere();
 319       break;
 320     }
 321   }
 322 
 323   return align_up(stk_args, 2);
 324 }
 325 
 326 
 327 // const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j+1;
 328 const uint SharedRuntime::java_return_convention_max_int = 6; 
 329 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
 330 
 331 int SharedRuntime::java_return_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) {
 332 
 333   // Create the mapping between argument positions and
 334   // registers.
 335   // r1, r2 used to address klasses and states, exclude it from return convention to avoid colision 
 336 
 337   static const Register INT_ArgReg[java_return_convention_max_int] = {
 338      r0 /* j_rarg7 */, j_rarg6, j_rarg5, j_rarg4, j_rarg3, j_rarg2
 339   };
 340 
 341   static const FloatRegister FP_ArgReg[java_return_convention_max_float] = {
 342     j_farg0, j_farg1, j_farg2, j_farg3, j_farg4, j_farg5, j_farg6, j_farg7
 343   };
 344 
 345   uint int_args = 0;
 346   uint fp_args = 0;
 347 
 348   for (int i = 0; i < total_args_passed; i++) {
 349     switch (sig_bt[i]) {
 350     case T_BOOLEAN:
 351     case T_CHAR:
 352     case T_BYTE:
 353     case T_SHORT:
 354     case T_INT:
 355       if (int_args < Argument::n_int_register_parameters_j) {
 356         regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
 357         int_args ++;
 358       } else {
 359         // Should we have gurantee here?
 360         return -1;
 361       }
 362       break;
 363     case T_VOID:
 364       // halves of T_LONG or T_DOUBLE
 365       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 366       regs[i].set_bad();
 367       break;
 368     case T_LONG:
 369       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 370       // fall through
 371     case T_OBJECT:
 372     case T_ARRAY:
 373     case T_ADDRESS:
 374       // Should T_METADATA be added to java_calling_convention as well ?
 375     case T_METADATA:
 376     case T_VALUETYPE:
 377       if (int_args < Argument::n_int_register_parameters_j) {
 378         regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
 379         int_args ++;
 380       } else {
 381         return -1;
 382       }
 383       break;
 384     case T_FLOAT:
 385       if (fp_args < Argument::n_float_register_parameters_j) {
 386         regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
 387         fp_args ++;
 388       } else {
 389         return -1;
 390       }
 391       break;
 392     case T_DOUBLE:
 393       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 394       if (fp_args < Argument::n_float_register_parameters_j) {
 395         regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
 396         fp_args ++;
 397       } else {
 398         return -1;
 399       }
 400       break;
 401     default:
 402       ShouldNotReachHere();
 403       break;
 404     }
 405   }
 406 
 407   return int_args + fp_args;
 408 }
 409 
 410 // Patch the callers callsite with entry to compiled code if it exists.
 411 static void patch_callers_callsite(MacroAssembler *masm) {
 412   Label L;
 413   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 414   __ cbz(rscratch1, L);
 415 
 416   __ enter();
 417   __ push_CPU_state();
 418 
 419   // VM needs caller's callsite
 420   // VM needs target method
 421   // This needs to be a long call since we will relocate this adapter to
 422   // the codeBuffer and it may not reach
 423 
 424 #ifndef PRODUCT
 425   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 426 #endif
 427 
 428   __ mov(c_rarg0, rmethod);
 429   __ mov(c_rarg1, lr);
 430   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 431   __ blrt(rscratch1, 2, 0, 0);
 432   __ maybe_isb();
 433 
 434   __ pop_CPU_state();
 435   // restore sp
 436   __ leave();
 437   __ bind(L);
 438 }
 439 
 440 // For each value type argument, sig includes the list of fields of
 441 // the value type. This utility function computes the number of
 442 // arguments for the call if value types are passed by reference (the
 443 // calling convention the interpreter expects).
 444 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) {
 445  int total_args_passed = 0;
 446  total_args_passed = sig_extended->length();
 447  return total_args_passed;
 448 }














 449 


 450 
 451 static void gen_c2i_adapter_helper(MacroAssembler* masm, BasicType bt, const VMRegPair& reg_pair, int extraspace, const Address& to) {












 452 
 453     // Say 4 args:
 454     // i   st_off
 455     // 0   32 T_LONG
 456     // 1   24 T_VOID
 457     // 2   16 T_OBJECT
 458     // 3    8 T_BOOL
 459     // -    0 return address
 460     //
 461     // However to make thing extra confusing. Because we can fit a long/double in
 462     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 463     // leaves one slot empty and only stores to a single slot. In this case the
 464     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 465 
 466     // int next_off = st_off - Interpreter::stackElementSize;
 467 
 468     VMReg r_1 = reg_pair.first();
 469     VMReg r_2 = reg_pair.second();
 470 
 471     if (!r_1->is_valid()) {
 472       assert(!r_2->is_valid(), "");
 473       return;
 474     }
 475 
 476     if (r_1->is_stack()) {
 477       // memory to memory use rscratch1
 478       // DMS CHECK: words_pushed is always 0 and can be removed?
 479       // int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace + words_pushed * wordSize);
 480       int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace);
 481       if (!r_2->is_valid()) {
 482         // sign extend??
 483         __ ldrw(rscratch1, Address(sp, ld_off));
 484         __ str(rscratch1, to);
 485 
 486       } else {

 487         __ ldr(rscratch1, Address(sp, ld_off));
 488         __ str(rscratch1, to);














 489       }
 490     } else if (r_1->is_Register()) {
 491       Register r = r_1->as_Register(); 
 492       __ str(r, to);


















 493     } else {
 494       assert(r_1->is_FloatRegister(), "");
 495       if (!r_2->is_valid()) {
 496         // only a float use just part of the slot
 497         __ strs(r_1->as_FloatRegister(), to);
 498       } else {
 499         __ strd(r_1->as_FloatRegister(), to);
 500       }
 501    }
 502 }
 503 
 504 static void gen_c2i_adapter(MacroAssembler *masm,
 505                             const GrowableArray<SigEntry>* sig_extended,
 506                             const VMRegPair *regs,
 507                             Label& skip_fixup,
 508                             address start,
 509                             OopMapSet* oop_maps,
 510                             int& frame_complete,
 511                             int& frame_size_in_words,
 512                             bool alloc_value_receiver) {
 513 
 514   // Before we get into the guts of the C2I adapter, see if we should be here
 515   // at all.  We've come from compiled code and are attempting to jump to the
 516   // interpreter, which means the caller made a static call to get here
 517   // (vcalls always get a compiled target if there is one).  Check for a
 518   // compiled target.  If there is one, we need to patch the caller's call.
 519   patch_callers_callsite(masm);
 520 
 521   __ bind(skip_fixup);
 522 
 523   bool has_value_argument = false;
 524   int words_pushed = 0;
 525 
 526   // Since all args are passed on the stack, total_args_passed *
 527   // Interpreter::stackElementSize is the space we need.
 528 
 529   int total_args_passed = compute_total_args_passed_int(sig_extended);
 530   int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
 531 
 532   // stack is aligned, keep it that way
 533   extraspace = align_up(extraspace, 2 * wordSize);
 534 
 535   __ mov(r13, sp);
 536 
 537   if (extraspace)
 538     __ sub(sp, sp, extraspace);
 539 
 540   // Now write the args into the outgoing interpreter space
 541 
 542   int ignored = 0, next_vt_arg = 0, next_arg_int = 0;
 543   bool has_oop_field = false;
 544 
 545   for (int next_arg_comp = 0; next_arg_comp < total_args_passed; next_arg_comp++) {
 546     BasicType bt = sig_extended->at(next_arg_comp)._bt;
 547     // offset to start parameters
 548     int st_off   = (total_args_passed - next_arg_int - 1) * Interpreter::stackElementSize;
 549 
 550     if (SigEntry::is_reserved_entry(sig_extended, next_arg_comp)) {
 551        continue; // Ignore reserved entry
 552     }
 553 
 554      if (bt == T_VOID) { 
 555        assert(next_arg_comp > 0 && (sig_extended->at(next_arg_comp - 1)._bt == T_LONG || sig_extended->at(next_arg_comp - 1)._bt == T_DOUBLE), "missing half");
 556        next_arg_int ++;
 557        continue;
 558      }
 559 
 560      int next_off = st_off - Interpreter::stackElementSize;
 561      int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
 562 
 563      gen_c2i_adapter_helper(masm, bt, regs[next_arg_comp], extraspace, Address(sp, offset)); 
 564      next_arg_int ++;
 565   }
 566 
 567 // If a value type was allocated and initialized, apply post barrier to all oop fields
 568   if (has_value_argument && has_oop_field) {
 569     __ push(r13); // save senderSP
 570     __ push(r1); // save callee
 571     // Allocate argument register save area
 572     if (frame::arg_reg_save_area_bytes != 0) {
 573       __ sub(sp, sp, frame::arg_reg_save_area_bytes);
 574     }
 575     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::apply_post_barriers), rthread, r10);
 576     // De-allocate argument register save area
 577     if (frame::arg_reg_save_area_bytes != 0) {
 578       __ add(sp, sp, frame::arg_reg_save_area_bytes);
 579     }
 580     __ pop(r1); // restore callee
 581     __ pop(r13); // restore sender SP
 582   }
 583 
 584   __ mov(esp, sp); // Interp expects args on caller's expression stack
 585 
 586   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 587   __ br(rscratch1);
 588 }
 589 
 590 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int comp_args_on_stack, const GrowableArray<SigEntry>* sig, const VMRegPair *regs) {
 591 





 592 
 593   // Note: r13 contains the senderSP on entry. We must preserve it since
 594   // we may do a i2c -> c2i transition if we lose a race where compiled
 595   // code goes non-entrant while we get args ready.
 596 
 597   // In addition we use r13 to locate all the interpreter args because
 598   // we must align the stack to 16 bytes.
 599 
 600   // Adapters are frameless.
 601 
 602   // An i2c adapter is frameless because the *caller* frame, which is
 603   // interpreted, routinely repairs its own esp (from
 604   // interpreter_frame_last_sp), even if a callee has modified the
 605   // stack pointer.  It also recalculates and aligns sp.
 606 
 607   // A c2i adapter is frameless because the *callee* frame, which is
 608   // interpreted, routinely repairs its caller's sp (from sender_sp,
 609   // which is set up via the senderSP register).
 610 
 611   // In other words, if *either* the caller or callee is interpreted, we can


 631       range_check(masm, rax, r11,
 632                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 633                   L_ok);
 634     if (StubRoutines::code1() != NULL)
 635       range_check(masm, rax, r11,
 636                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
 637                   L_ok);
 638     if (StubRoutines::code2() != NULL)
 639       range_check(masm, rax, r11,
 640                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
 641                   L_ok);
 642     const char* msg = "i2c adapter must return to an interpreter frame";
 643     __ block_comment(msg);
 644     __ stop(msg);
 645     __ bind(L_ok);
 646     __ block_comment("} verify_i2ce ");
 647 #endif
 648   }
 649 
 650   // Cut-out for having no stack args.
 651   int comp_words_on_stack = 0; 
 652   if (comp_args_on_stack) {
 653      comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord;
 654      __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 655      __ andr(sp, rscratch1, -16);
 656   }
 657 
 658   // Will jump to the compiled code just as if compiled code was doing it.
 659   // Pre-load the register-jump target early, to schedule it better.
 660   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
 661 
 662 #if INCLUDE_JVMCI
 663   if (EnableJVMCI || UseAOT) {
 664     // check if this call should be routed towards a specific entry point
 665     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 666     Label no_alternative_target;
 667     __ cbz(rscratch2, no_alternative_target);
 668     __ mov(rscratch1, rscratch2);
 669     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 670     __ bind(no_alternative_target);
 671   }
 672 #endif // INCLUDE_JVMCI
 673 
 674   int total_args_passed = sig->length();
 675 
 676   // Now generate the shuffle code.
 677   for (int i = 0; i < total_args_passed; i++) {
 678     BasicType bt = sig->at(i)._bt; 
 679 
 680     assert(bt != T_VALUETYPE, "i2c adapter doesn't unpack value args");
 681     if (bt == T_VOID) {
 682       assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half");
 683       continue;
 684     }
 685 
 686     // Pick up 0, 1 or 2 words from SP+offset.
 687     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?");
 688 


 689     // Load in argument order going down.
 690     int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 691     // Point to interpreter value (vs. tag)
 692     int next_off = ld_off - Interpreter::stackElementSize;
 693     //
 694     //
 695     //
 696     VMReg r_1 = regs[i].first();
 697     VMReg r_2 = regs[i].second();
 698     if (!r_1->is_valid()) {
 699       assert(!r_2->is_valid(), "");
 700       continue;
 701     }
 702     if (r_1->is_stack()) {
 703       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 704       int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size;
 705       if (!r_2->is_valid()) {
 706         // sign extend???
 707         __ ldrsw(rscratch2, Address(esp, ld_off));
 708         __ str(rscratch2, Address(sp, st_off));
 709       } else {
 710         //
 711         // We are using two optoregs. This can be either T_OBJECT,
 712         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 713         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 714         // So we must adjust where to pick up the data to match the
 715         // interpreter.
 716         //
 717         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 718         // are accessed as negative so LSW is at LOW address
 719 
 720         // ld_off is MSW so get LSW
 721         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;

 722         __ ldr(rscratch2, Address(esp, offset));
 723         // st_off is LSW (i.e. reg.first())
 724          __ str(rscratch2, Address(sp, st_off));
 725        }
 726      } else if (r_1->is_Register()) {  // Register argument
 727        Register r = r_1->as_Register();
 728        if (r_2->is_valid()) {
 729          //
 730          // We are using two VMRegs. This can be either T_OBJECT,
 731          // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 732          // two slots but only uses one for thr T_LONG or T_DOUBLE case
 733          // So we must adjust where to pick up the data to match the
 734          // interpreter.
 735  
 736         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;

 737  
 738          // this can be a misaligned move
 739          __ ldr(r, Address(esp, offset));
 740        } else {
 741          // sign extend and use a full word?
 742          __ ldrw(r, Address(esp, ld_off));
 743        }
 744      } else {
 745        if (!r_2->is_valid()) {
 746          __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 747        } else {
 748          __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 749        }
 750      }
 751    }
 752 
 753 
 754   // 6243940 We might end up in handle_wrong_method if
 755   // the callee is deoptimized as we race thru here. If that
 756   // happens we don't want to take a safepoint because the
 757   // caller frame will look interpreted and arguments are now
 758   // "compiled" so it is much better to make this transition
 759   // invisible to the stack walking code. Unfortunately if
 760   // we try and find the callee by normal means a safepoint
 761   // is possible. So we stash the desired callee in the thread
 762   // and the vm will find there should this case occur.
 763 
 764   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));

 765   __ br(rscratch1);
 766 }
 767 
 768 #ifdef BUILTIN_SIM
 769 static void generate_i2c_adapter_name(char *result, int total_args_passed, const BasicType *sig_bt)
 770 {
 771   strcpy(result, "i2c(");
 772   int idx = 4;
 773   for (int i = 0; i < total_args_passed; i++) {
 774     switch(sig_bt[i]) {
 775     case T_BOOLEAN:
 776       result[idx++] = 'Z';
 777       break;
 778     case T_CHAR:
 779       result[idx++] = 'C';
 780       break;
 781     case T_FLOAT:
 782       result[idx++] = 'F';
 783       break;
 784     case T_DOUBLE:


 813       break;
 814     case T_NARROWOOP:
 815       result[idx++] = 'N';
 816       break;
 817     case T_METADATA:
 818       result[idx++] = 'M';
 819       break;
 820     case T_NARROWKLASS:
 821       result[idx++] = 'K';
 822       break;
 823     default:
 824       result[idx++] = '?';
 825       break;
 826     }
 827   }
 828   result[idx++] = ')';
 829   result[idx] = '\0';
 830 }
 831 #endif
 832 
 833 static void gen_inline_cache_check(MacroAssembler *masm, Label& skip_fixup) {

























 834 
 835   Label ok;
 836 
 837   Register holder = rscratch2;
 838   Register receiver = j_rarg0;
 839   Register tmp = r10;  // A call-clobbered register not used for arg passing
 840 
 841   // -------------------------------------------------------------------------
 842   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
 843   // to the interpreter.  The args start out packed in the compiled layout.  They
 844   // need to be unpacked into the interpreter layout.  This will almost always
 845   // require some stack space.  We grow the current (compiled) stack, then repack
 846   // the args.  We  finally end in a jump to the generic interpreter entry point.
 847   // On exit from the interpreter, the interpreter will restore our SP (lest the
 848   // compiled code, which relys solely on SP and not FP, get sick).
 849 
 850   {
 851     __ block_comment("c2i_unverified_entry {");
 852     __ load_klass(rscratch1, receiver);
 853     __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
 854     __ cmp(rscratch1, tmp);
 855     __ ldr(rmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
 856     __ br(Assembler::EQ, ok);
 857     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 858 
 859     __ bind(ok);
 860     // Method might have been compiled since the call site was patched to
 861     // interpreted; if that is the case treat it as a miss so we can get
 862     // the call site corrected.
 863     __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 864     __ cbz(rscratch1, skip_fixup);
 865     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 866     __ block_comment("} c2i_unverified_entry");
 867   }
 868 

 869 
 870 }
 871 
 872 
 873 
 874 // ---------------------------------------------------------------
 875 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 876                                                             int comp_args_on_stack,
 877                                                             const GrowableArray<SigEntry>* sig,
 878                                                             const VMRegPair* regs,
 879                                                             const GrowableArray<SigEntry>* sig_cc,
 880                                                             const VMRegPair* regs_cc,
 881                                                             const GrowableArray<SigEntry>* sig_cc_ro,
 882                                                             const VMRegPair* regs_cc_ro,
 883                                                             AdapterFingerPrint* fingerprint,
 884                                                             AdapterBlob*& new_adapter) {
 885 
 886   address i2c_entry = __ pc();
 887   gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);
 888 
 889   address c2i_unverified_entry = __ pc();
 890   Label skip_fixup;
 891 
 892 
 893   gen_inline_cache_check(masm, skip_fixup);
 894 
 895   OopMapSet* oop_maps = new OopMapSet();
 896   int frame_complete = CodeOffsets::frame_never_safe;
 897   int frame_size_in_words = 0;
 898 
 899   // Scalarized c2i adapter with non-scalarized receiver (i.e., don't pack receiver)
 900   address c2i_value_ro_entry = __ pc();
 901   if (regs_cc != regs_cc_ro) {
 902     Label unused;
 903     gen_c2i_adapter(masm, sig_cc_ro, regs_cc_ro, skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, false);
 904     skip_fixup = unused;
 905   }

 906 
 907   // Scalarized c2i adapter
 908   address c2i_entry = __ pc();
 909 
 910   // Not implemented
 911   // BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 912   // bs->c2i_entry_barrier(masm);
 913 
 914   gen_c2i_adapter(masm, sig_cc, regs_cc, skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, true);
 915 
 916   address c2i_unverified_value_entry = c2i_unverified_entry;
 917 
 918  // Non-scalarized c2i adapter
 919   address c2i_value_entry = c2i_entry;
 920   if (regs != regs_cc) {
 921     Label value_entry_skip_fixup;
 922     c2i_unverified_value_entry = __ pc();
 923     gen_inline_cache_check(masm, value_entry_skip_fixup);
 924 
 925     c2i_value_entry = __ pc();
 926     Label unused;
 927     gen_c2i_adapter(masm, sig, regs, value_entry_skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, false);
 928   }
 929 
 930   __ flush();
 931 
 932   // The c2i adapter might safepoint and trigger a GC. The caller must make sure that
 933   // the GC knows about the location of oop argument locations passed to the c2i adapter.
 934 
 935   bool caller_must_gc_arguments = (regs != regs_cc);
 936   new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words + 10, oop_maps, caller_must_gc_arguments);
 937 
 938   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_value_entry, c2i_value_ro_entry, c2i_unverified_entry, c2i_unverified_value_entry);
 939 
 940 }
 941 
 942 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 943                                          VMRegPair *regs,
 944                                          VMRegPair *regs2,
 945                                          int total_args_passed) {
 946   assert(regs2 == NULL, "not needed on AArch64");
 947 
 948 // We return the amount of VMRegImpl stack slots we need to reserve for all
 949 // the arguments NOT counting out_preserve_stack_slots.
 950 
 951     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 952       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
 953     };
 954     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 955       c_farg0, c_farg1, c_farg2, c_farg3,
 956       c_farg4, c_farg5, c_farg6, c_farg7
 957     };
 958 
 959     uint int_args = 0;


 962 
 963     for (int i = 0; i < total_args_passed; i++) {
 964       switch (sig_bt[i]) {
 965       case T_BOOLEAN:
 966       case T_CHAR:
 967       case T_BYTE:
 968       case T_SHORT:
 969       case T_INT:
 970         if (int_args < Argument::n_int_register_parameters_c) {
 971           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 972         } else {
 973           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 974           stk_args += 2;
 975         }
 976         break;
 977       case T_LONG:
 978         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 979         // fall through
 980       case T_OBJECT:
 981       case T_ARRAY:
 982       case T_VALUETYPE:
 983       case T_ADDRESS:
 984       case T_METADATA:
 985         if (int_args < Argument::n_int_register_parameters_c) {
 986           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 987         } else {
 988           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 989           stk_args += 2;
 990         }
 991         break;
 992       case T_FLOAT:
 993         if (fp_args < Argument::n_float_register_parameters_c) {
 994           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 995         } else {
 996           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 997           stk_args += 2;
 998         }
 999         break;
1000       case T_DOUBLE:
1001         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1002         if (fp_args < Argument::n_float_register_parameters_c) {


1839       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1840     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1841       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1842     }
1843 #endif /* ASSERT */
1844     switch (in_sig_bt[i]) {
1845       case T_ARRAY:
1846         if (is_critical_native) {
1847           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
1848           c_arg++;
1849 #ifdef ASSERT
1850           if (out_regs[c_arg].first()->is_Register()) {
1851             reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1852           } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1853             freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1854           }
1855 #endif
1856           int_args++;
1857           break;
1858         }
1859       case T_VALUETYPE:
1860       case T_OBJECT:
1861         assert(!is_critical_native, "no oop arguments");
1862         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1863                     ((i == 0) && (!is_static)),
1864                     &receiver_offset);
1865         int_args++;
1866         break;
1867       case T_VOID:
1868         break;
1869 
1870       case T_FLOAT:
1871         float_move(masm, in_regs[i], out_regs[c_arg]);
1872         float_args++;
1873         break;
1874 
1875       case T_DOUBLE:
1876         assert( i + 1 < total_in_args &&
1877                 in_sig_bt[i + 1] == T_VOID &&
1878                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1879         double_move(masm, in_regs[i], out_regs[c_arg]);


2021   }
2022 
2023   // Now set thread in native
2024   __ mov(rscratch1, _thread_in_native);
2025   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
2026   __ stlrw(rscratch1, rscratch2);
2027 
2028   {
2029     int return_type = 0;
2030     switch (ret_type) {
2031     case T_VOID: break;
2032       return_type = 0; break;
2033     case T_CHAR:
2034     case T_BYTE:
2035     case T_SHORT:
2036     case T_INT:
2037     case T_BOOLEAN:
2038     case T_LONG:
2039       return_type = 1; break;
2040     case T_ARRAY:
2041     case T_VALUETYPE:
2042     case T_OBJECT:
2043       return_type = 1; break;
2044     case T_FLOAT:
2045       return_type = 2; break;
2046     case T_DOUBLE:
2047       return_type = 3; break;
2048     default:
2049       ShouldNotReachHere();
2050     }
2051     rt_call(masm, native_func,
2052             int_args + 2, // AArch64 passes up to 8 args in int registers
2053             float_args,   // and up to 8 float args
2054             return_type);
2055   }
2056 
2057   __ bind(native_return);
2058 
2059   intptr_t return_pc = (intptr_t) __ pc();
2060   oop_maps->add_gc_map(return_pc - start, map);
2061 
2062   // Unpack native results.
2063   switch (ret_type) {
2064   case T_BOOLEAN: __ c2bool(r0);                     break;
2065   case T_CHAR   : __ ubfx(r0, r0, 0, 16);            break;
2066   case T_BYTE   : __ sbfx(r0, r0, 0, 8);             break;
2067   case T_SHORT  : __ sbfx(r0, r0, 0, 16);            break;
2068   case T_INT    : __ sbfx(r0, r0, 0, 32);            break;
2069   case T_DOUBLE :
2070   case T_FLOAT  :
2071     // Result is in v0 we'll save as needed
2072     break;
2073   case T_ARRAY:                 // Really a handle
2074   case T_VALUETYPE:
2075   case T_OBJECT:                // Really a handle
2076       break; // can't de-handlize until after safepoint check
2077   case T_VOID: break;
2078   case T_LONG: break;
2079   default       : ShouldNotReachHere();
2080   }
2081 
2082   // Switch thread to "native transition" state before reading the synchronization state.
2083   // This additional state is necessary because reading and testing the synchronization
2084   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2085   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2086   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2087   //     Thread A is resumed to finish this native method, but doesn't block here since it
2088   //     didn't see any synchronization is progress, and escapes.
2089   __ mov(rscratch1, _thread_in_native_trans);
2090 
2091   __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
2092 
2093   // Force this write out before the read below
2094   __ dmb(Assembler::ISH);


2159     __ bind(unlock_done);
2160     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2161       restore_native_result(masm, ret_type, stack_slots);
2162     }
2163 
2164     __ bind(done);
2165   }
2166 
2167   Label dtrace_method_exit, dtrace_method_exit_done;
2168   {
2169     unsigned long offset;
2170     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
2171     __ ldrb(rscratch1, Address(rscratch1, offset));
2172     __ cbnzw(rscratch1, dtrace_method_exit);
2173     __ bind(dtrace_method_exit_done);
2174   }
2175 
2176   __ reset_last_Java_frame(false);
2177 
2178   // Unbox oop result, e.g. JNIHandles::resolve result.
2179   if (ret_type == T_OBJECT || ret_type == T_ARRAY || ret_type == T_VALUETYPE) {
2180     __ resolve_jobject(r0, rthread, rscratch2);
2181   }
2182 
2183   if (CheckJNICalls) {
2184     // clear_pending_jni_exception_check
2185     __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
2186   }
2187 
2188   if (!is_critical_native) {
2189     // reset handle block
2190     __ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
2191     __ str(zr, Address(r2, JNIHandleBlock::top_offset_in_bytes()));
2192   }
2193 
2194   __ leave();
2195 
2196   if (!is_critical_native) {
2197     // Any exception pending?
2198     __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2199     __ cbnz(rscratch1, exception_pending);


3315   __ str(zr, Address(rthread, JavaThread::exception_handler_pc_offset()));
3316   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
3317 #endif
3318   // Clear the exception oop so GC no longer processes it as a root.
3319   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
3320 
3321   // r0: exception oop
3322   // r8:  exception handler
3323   // r4: exception pc
3324   // Jump to handler
3325 
3326   __ br(r8);
3327 
3328   // Make sure all code is generated
3329   masm->flush();
3330 
3331   // Set exception blob
3332   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3333 }
3334 #endif // COMPILER2_OR_JVMCI
3335 
3336 BufferedValueTypeBlob* SharedRuntime::generate_buffered_value_type_adapter(const ValueKlass* vk) {
3337   BufferBlob* buf = BufferBlob::create("value types pack/unpack", 16 * K);
3338   CodeBuffer buffer(buf);
3339   short buffer_locs[20];
3340   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3341                                          sizeof(buffer_locs)/sizeof(relocInfo));
3342 
3343   MacroAssembler _masm(&buffer);
3344   MacroAssembler* masm = &_masm;
3345 
3346   const Array<SigEntry>* sig_vk = vk->extended_sig();
3347   const Array<VMRegPair>* regs = vk->return_regs();
3348 
3349   int pack_fields_off = __ offset();
3350 
3351   int j = 1;
3352   for (int i = 0; i < sig_vk->length(); i++) {
3353     BasicType bt = sig_vk->at(i)._bt;
3354     if (bt == T_VALUETYPE) {
3355       continue;
3356     }
3357     if (bt == T_VOID) {
3358       if (sig_vk->at(i-1)._bt == T_LONG ||
3359           sig_vk->at(i-1)._bt == T_DOUBLE) {
3360         j++;
3361       }
3362       continue;
3363     }
3364     int off = sig_vk->at(i)._offset;
3365     VMRegPair pair = regs->at(j);
3366     VMReg r_1 = pair.first();
3367     VMReg r_2 = pair.second();
3368     Address to(r0, off);
3369     if (bt == T_FLOAT) { 
3370       __ strs(r_1->as_FloatRegister(), to);
3371     } else if (bt == T_DOUBLE) {
3372       __ strd(r_1->as_FloatRegister(), to);
3373     } else if (bt == T_OBJECT || bt == T_ARRAY) {
3374       Register val = r_1->as_Register();
3375       assert_different_registers(r0, val);
3376       // We don't need barriers because the destination is a newly allocated object.
3377       // Also, we cannot use store_heap_oop(to, val) because it uses r8 as tmp.
3378       if (UseCompressedOops) {
3379         __ encode_heap_oop(val);
3380         __ str(val, to);
3381       } else {
3382         __ str(val, to);
3383       }
3384     } else {
3385       assert(is_java_primitive(bt), "unexpected basic type");
3386       assert_different_registers(r0, r_1->as_Register());
3387       size_t size_in_bytes = type2aelembytes(bt);
3388       __ store_sized_value(to, r_1->as_Register(), size_in_bytes);
3389     }
3390     j++;
3391   }
3392   assert(j == regs->length(), "missed a field?");
3393 
3394   __ ret(lr);
3395 
3396   int unpack_fields_off = __ offset();
3397 
3398   j = 1;
3399   for (int i = 0; i < sig_vk->length(); i++) {
3400     BasicType bt = sig_vk->at(i)._bt;
3401     if (bt == T_VALUETYPE) {
3402       continue;
3403     }
3404     if (bt == T_VOID) {
3405       if (sig_vk->at(i-1)._bt == T_LONG ||
3406           sig_vk->at(i-1)._bt == T_DOUBLE) {
3407         j++;
3408       }
3409       continue;
3410     }
3411     int off = sig_vk->at(i)._offset;
3412     VMRegPair pair = regs->at(j);
3413     VMReg r_1 = pair.first();
3414     VMReg r_2 = pair.second();
3415     Address from(r0, off);
3416     if (bt == T_FLOAT) {
3417       __ ldrs(r_1->as_FloatRegister(), from);
3418     } else if (bt == T_DOUBLE) {
3419       __ ldrd(r_1->as_FloatRegister(), from);
3420     } else if (bt == T_OBJECT || bt == T_ARRAY) {
3421        assert_different_registers(r0, r_1->as_Register());
3422        __ load_heap_oop(r_1->as_Register(), from);
3423     } else {
3424       assert(is_java_primitive(bt), "unexpected basic type");
3425       assert_different_registers(r0, r_1->as_Register());
3426 
3427       size_t size_in_bytes = type2aelembytes(bt);
3428       __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
3429     }
3430     j++;
3431   }
3432   assert(j == regs->length(), "missed a field?");
3433 
3434   __ ret(lr);
3435 
3436   __ flush();
3437 
3438   return BufferedValueTypeBlob::create(&buffer, pack_fields_off, unpack_fields_off);
3439 }
< prev index next >