1279
1280 // Unspill the temp. registers:
1281 pop(pushed_registers, sp);
1282
1283 br(Assembler::NE, *L_failure);
1284
1285 // Success. Cache the super we found and proceed in triumph.
1286 str(super_klass, super_cache_addr);
1287
1288 if (L_success != &L_fallthrough) {
1289 b(*L_success);
1290 }
1291
1292 #undef IS_A_TEMP
1293
1294 bind(L_fallthrough);
1295 }
1296
1297
1298 void MacroAssembler::verify_oop(Register reg, const char* s) {
1299 if (!VerifyOops) return;
1300
1301 // Pass register number to verify_oop_subroutine
1302 const char* b = NULL;
1303 {
1304 ResourceMark rm;
1305 stringStream ss;
1306 ss.print("verify_oop: %s: %s", reg->name(), s);
1307 b = code_string(ss.as_string());
1308 }
1309 BLOCK_COMMENT("verify_oop {");
1310
1311 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1312 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1313
1314 mov(r0, reg);
1315 mov(rscratch1, (address)b);
1316
1317 // call indirectly to solve generation ordering problem
1318 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
1319 ldr(rscratch2, Address(rscratch2));
1320 blr(rscratch2);
1321
1322 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
1323 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
1324
1325 BLOCK_COMMENT("} verify_oop");
1326 }
1327
1328 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
1329 if (!VerifyOops) return;
1330
1331 const char* b = NULL;
1332 {
1333 ResourceMark rm;
1334 stringStream ss;
1335 ss.print("verify_oop_addr: %s", s);
1336 b = code_string(ss.as_string());
1337 }
1338 BLOCK_COMMENT("verify_oop_addr {");
1339
1340 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1341 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1342
1343 // addr may contain sp so we will have to adjust it based on the
1344 // pushes that we just did.
1345 if (addr.uses(sp)) {
1346 lea(r0, addr);
1347 ldr(r0, Address(r0, 4 * wordSize));
1348 } else {
1349 ldr(r0, addr);
1412
1413 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
1414 pass_arg0(this, arg_0);
1415 call_VM_leaf_base(entry_point, 1);
1416 }
1417
1418 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1419 pass_arg0(this, arg_0);
1420 pass_arg1(this, arg_1);
1421 call_VM_leaf_base(entry_point, 2);
1422 }
1423
1424 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
1425 Register arg_1, Register arg_2) {
1426 pass_arg0(this, arg_0);
1427 pass_arg1(this, arg_1);
1428 pass_arg2(this, arg_2);
1429 call_VM_leaf_base(entry_point, 3);
1430 }
1431
1432 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1433 pass_arg0(this, arg_0);
1434 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1435 }
1436
1437 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1438
1439 assert(arg_0 != c_rarg1, "smashed arg");
1440 pass_arg1(this, arg_1);
1441 pass_arg0(this, arg_0);
1442 MacroAssembler::call_VM_leaf_base(entry_point, 2);
1443 }
1444
1445 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1446 assert(arg_0 != c_rarg2, "smashed arg");
1447 assert(arg_1 != c_rarg2, "smashed arg");
1448 pass_arg2(this, arg_2);
1449 assert(arg_0 != c_rarg1, "smashed arg");
1450 pass_arg1(this, arg_1);
1451 pass_arg0(this, arg_0);
1461 assert(arg_1 != c_rarg2, "smashed arg");
1462 pass_arg2(this, arg_2);
1463 assert(arg_0 != c_rarg1, "smashed arg");
1464 pass_arg1(this, arg_1);
1465 pass_arg0(this, arg_0);
1466 MacroAssembler::call_VM_leaf_base(entry_point, 4);
1467 }
1468
1469 void MacroAssembler::null_check(Register reg, int offset) {
1470 if (needs_explicit_null_check(offset)) {
1471 // provoke OS NULL exception if reg = NULL by
1472 // accessing M[reg] w/o changing any registers
1473 // NOTE: this is plenty to provoke a segv
1474 ldr(zr, Address(reg));
1475 } else {
1476 // nothing to do, (later) access of M[reg + offset]
1477 // will provoke OS NULL exception if reg = NULL
1478 }
1479 }
1480
1481 // MacroAssembler protected routines needed to implement
1482 // public methods
1483
1484 void MacroAssembler::mov(Register r, Address dest) {
1485 code_section()->relocate(pc(), dest.rspec());
1486 u_int64_t imm64 = (u_int64_t)dest.target();
1487 movptr(r, imm64);
1488 }
1489
1490 // Move a constant pointer into r. In AArch64 mode the virtual
1491 // address space is 48 bits in size, so we only need three
1492 // instructions to create a patchable instruction sequence that can
1493 // reach anywhere.
1494 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
1495 #ifndef PRODUCT
1496 {
1497 char buffer[64];
1498 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
1499 block_comment(buffer);
1500 }
5835
5836 // get_thread() can be called anywhere inside generated code so we
5837 // need to save whatever non-callee save context might get clobbered
5838 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed,
5839 // the call setup code.
5840 //
5841 // aarch64_get_thread_helper() clobbers only r0, r1, and flags.
5842 //
5843 void MacroAssembler::get_thread(Register dst) {
5844 RegSet saved_regs = RegSet::range(r0, r1) + lr - dst;
5845 push(saved_regs, sp);
5846
5847 mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper));
5848 blrt(lr, 1, 0, 1);
5849 if (dst != c_rarg0) {
5850 mov(dst, c_rarg0);
5851 }
5852
5853 pop(saved_regs, sp);
5854 }
|
1279
1280 // Unspill the temp. registers:
1281 pop(pushed_registers, sp);
1282
1283 br(Assembler::NE, *L_failure);
1284
1285 // Success. Cache the super we found and proceed in triumph.
1286 str(super_klass, super_cache_addr);
1287
1288 if (L_success != &L_fallthrough) {
1289 b(*L_success);
1290 }
1291
1292 #undef IS_A_TEMP
1293
1294 bind(L_fallthrough);
1295 }
1296
1297
1298 void MacroAssembler::verify_oop(Register reg, const char* s) {
1299 if (!VerifyOops || VerifyAdapterSharing) {
1300 // Below address of the code string confuses VerifyAdapterSharing
1301 // because it may differ between otherwise equivalent adapters.
1302 return;
1303 }
1304
1305 // Pass register number to verify_oop_subroutine
1306 const char* b = NULL;
1307 {
1308 ResourceMark rm;
1309 stringStream ss;
1310 ss.print("verify_oop: %s: %s", reg->name(), s);
1311 b = code_string(ss.as_string());
1312 }
1313 BLOCK_COMMENT("verify_oop {");
1314
1315 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1316 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1317
1318 mov(r0, reg);
1319 mov(rscratch1, (address)b);
1320
1321 // call indirectly to solve generation ordering problem
1322 lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
1323 ldr(rscratch2, Address(rscratch2));
1324 blr(rscratch2);
1325
1326 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
1327 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
1328
1329 BLOCK_COMMENT("} verify_oop");
1330 }
1331
1332 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
1333 if (!VerifyOops || VerifyAdapterSharing) {
1334 // Below address of the code string confuses VerifyAdapterSharing
1335 // because it may differ between otherwise equivalent adapters.
1336 return;
1337 }
1338
1339 const char* b = NULL;
1340 {
1341 ResourceMark rm;
1342 stringStream ss;
1343 ss.print("verify_oop_addr: %s", s);
1344 b = code_string(ss.as_string());
1345 }
1346 BLOCK_COMMENT("verify_oop_addr {");
1347
1348 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1349 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1350
1351 // addr may contain sp so we will have to adjust it based on the
1352 // pushes that we just did.
1353 if (addr.uses(sp)) {
1354 lea(r0, addr);
1355 ldr(r0, Address(r0, 4 * wordSize));
1356 } else {
1357 ldr(r0, addr);
1420
1421 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
1422 pass_arg0(this, arg_0);
1423 call_VM_leaf_base(entry_point, 1);
1424 }
1425
1426 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1427 pass_arg0(this, arg_0);
1428 pass_arg1(this, arg_1);
1429 call_VM_leaf_base(entry_point, 2);
1430 }
1431
1432 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
1433 Register arg_1, Register arg_2) {
1434 pass_arg0(this, arg_0);
1435 pass_arg1(this, arg_1);
1436 pass_arg2(this, arg_2);
1437 call_VM_leaf_base(entry_point, 3);
1438 }
1439
1440 void MacroAssembler::super_call_VM_leaf(address entry_point) {
1441 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1442 }
1443
1444 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1445 pass_arg0(this, arg_0);
1446 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1447 }
1448
1449 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1450
1451 assert(arg_0 != c_rarg1, "smashed arg");
1452 pass_arg1(this, arg_1);
1453 pass_arg0(this, arg_0);
1454 MacroAssembler::call_VM_leaf_base(entry_point, 2);
1455 }
1456
1457 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1458 assert(arg_0 != c_rarg2, "smashed arg");
1459 assert(arg_1 != c_rarg2, "smashed arg");
1460 pass_arg2(this, arg_2);
1461 assert(arg_0 != c_rarg1, "smashed arg");
1462 pass_arg1(this, arg_1);
1463 pass_arg0(this, arg_0);
1473 assert(arg_1 != c_rarg2, "smashed arg");
1474 pass_arg2(this, arg_2);
1475 assert(arg_0 != c_rarg1, "smashed arg");
1476 pass_arg1(this, arg_1);
1477 pass_arg0(this, arg_0);
1478 MacroAssembler::call_VM_leaf_base(entry_point, 4);
1479 }
1480
1481 void MacroAssembler::null_check(Register reg, int offset) {
1482 if (needs_explicit_null_check(offset)) {
1483 // provoke OS NULL exception if reg = NULL by
1484 // accessing M[reg] w/o changing any registers
1485 // NOTE: this is plenty to provoke a segv
1486 ldr(zr, Address(reg));
1487 } else {
1488 // nothing to do, (later) access of M[reg + offset]
1489 // will provoke OS NULL exception if reg = NULL
1490 }
1491 }
1492
1493 void MacroAssembler::test_klass_is_value(Register klass, Register temp_reg, Label& is_value) {
1494 ldrw(temp_reg, Address(klass, Klass::access_flags_offset()));
1495 andr(temp_reg, temp_reg, JVM_ACC_VALUE);
1496 cbnz(temp_reg, is_value);
1497 }
1498
1499 void MacroAssembler::test_field_is_flattenable(Register flags, Register temp_reg, Label& is_flattenable) {
1500 (void) temp_reg; // keep signature uniform with x86
1501 tbnz(flags, ConstantPoolCacheEntry::is_flattenable_field_shift, is_flattenable);
1502 }
1503
1504 void MacroAssembler::test_field_is_not_flattenable(Register flags, Register temp_reg, Label& not_flattenable) {
1505 (void) temp_reg; // keep signature uniform with x86
1506 tbz(flags, ConstantPoolCacheEntry::is_flattenable_field_shift, not_flattenable);
1507 }
1508
1509 void MacroAssembler::test_field_is_flattened(Register flags, Register temp_reg, Label& is_flattened) {
1510 (void) temp_reg; // keep signature uniform with x86
1511 tbnz(flags, ConstantPoolCacheEntry::is_flattened_field_shift, is_flattened);
1512 }
1513
1514 void MacroAssembler::test_flat_array_klass(Register klass, Register temp_reg, Label& is_flattened) {
1515 ldrw(temp_reg, Address(klass, Klass::layout_helper_offset()));
1516 asrw(temp_reg, temp_reg, Klass::_lh_array_tag_shift);
1517 cmpw(temp_reg, Klass::_lh_array_tag_vt_value);
1518 br(Assembler::EQ, is_flattened);
1519 }
1520
1521 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg, Label& is_flattened) {
1522 load_klass(temp_reg, oop);
1523 test_flat_array_klass(temp_reg, temp_reg, is_flattened);
1524 }
1525
1526 // MacroAssembler protected routines needed to implement
1527 // public methods
1528
1529 void MacroAssembler::mov(Register r, Address dest) {
1530 code_section()->relocate(pc(), dest.rspec());
1531 u_int64_t imm64 = (u_int64_t)dest.target();
1532 movptr(r, imm64);
1533 }
1534
1535 // Move a constant pointer into r. In AArch64 mode the virtual
1536 // address space is 48 bits in size, so we only need three
1537 // instructions to create a patchable instruction sequence that can
1538 // reach anywhere.
1539 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
1540 #ifndef PRODUCT
1541 {
1542 char buffer[64];
1543 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
1544 block_comment(buffer);
1545 }
5880
5881 // get_thread() can be called anywhere inside generated code so we
5882 // need to save whatever non-callee save context might get clobbered
5883 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed,
5884 // the call setup code.
5885 //
5886 // aarch64_get_thread_helper() clobbers only r0, r1, and flags.
5887 //
5888 void MacroAssembler::get_thread(Register dst) {
5889 RegSet saved_regs = RegSet::range(r0, r1) + lr - dst;
5890 push(saved_regs, sp);
5891
5892 mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper));
5893 blrt(lr, 1, 0, 1);
5894 if (dst != c_rarg0) {
5895 mov(dst, c_rarg0);
5896 }
5897
5898 pop(saved_regs, sp);
5899 }
5900
5901 // C2 compiled method's prolog code
5902 // Moved here from aarch64.ad to support Valhalla code belows
5903 void MacroAssembler::verified_entry(Compile* C, int sp_inc) {
5904
5905 // n.b. frame size includes space for return pc and rfp
5906 const long framesize = C->frame_size_in_bytes();
5907 assert(framesize % (2 * wordSize) == 0, "must preserve 2 * wordSize alignment");
5908
5909 // insert a nop at the start of the prolog so we can patch in a
5910 // branch if we need to invalidate the method later
5911 nop();
5912
5913 int bangsize = C->bang_size_in_bytes();
5914 if (C->need_stack_bang(bangsize) && UseStackBanging)
5915 generate_stack_overflow_check(bangsize);
5916
5917 build_frame(framesize);
5918
5919 if (NotifySimulator) {
5920 notify(Assembler::method_entry);
5921 }
5922
5923 if (VerifyStackAtCalls) {
5924 Unimplemented();
5925 }
5926 }
5927
5928
5929 // DMS TODO: Need extra eyes to bring code below to good shape.
5930 //
5931 void MacroAssembler::unpack_value_args(Compile* C, bool receiver_only) {
5932
5933 assert(C->has_scalarized_args(), "value type argument scalarization is disabled");
5934 Method* method = C->method()->get_Method();
5935 const GrowableArray<SigEntry>* sig_cc = method->adapter()->get_sig_cc();
5936 assert(sig_cc != NULL, "must have scalarized signature");
5937
5938 // Get unscalarized calling convention
5939 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sig_cc->length());
5940 int args_passed = 0;
5941 if (!method->is_static()) {
5942 sig_bt[args_passed++] = T_OBJECT;
5943 }
5944 if (!receiver_only) {
5945 for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
5946 BasicType bt = ss.type();
5947 sig_bt[args_passed++] = bt;
5948 if (type2size[bt] == 2) {
5949 sig_bt[args_passed++] = T_VOID;
5950 }
5951 }
5952 } else {
5953 // Only unpack the receiver, all other arguments are already scalarized
5954 InstanceKlass* holder = method->method_holder();
5955 int rec_len = holder->is_value() ? ValueKlass::cast(holder)->extended_sig()->length() : 1;
5956 // Copy scalarized signature but skip receiver, value type delimiters and reserved entries
5957 for (int i = 0; i < sig_cc->length(); i++) {
5958 if (!SigEntry::is_reserved_entry(sig_cc, i)) {
5959 if (SigEntry::skip_value_delimiters(sig_cc, i) && rec_len <= 0) {
5960 sig_bt[args_passed++] = sig_cc->at(i)._bt;
5961 }
5962 rec_len--;
5963 }
5964 }
5965 }
5966
5967 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, args_passed);
5968 int args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, args_passed, false);
5969
5970 // Get scalarized calling convention
5971 int args_passed_cc = SigEntry::fill_sig_bt(sig_cc, sig_bt);
5972 VMRegPair* regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, sig_cc->length());
5973 int args_on_stack_cc = SharedRuntime::java_calling_convention(sig_bt, regs_cc, args_passed_cc, false);
5974
5975 // Check if we need to extend the stack for unpacking
5976 int sp_inc = (args_on_stack_cc - args_on_stack) * VMRegImpl::stack_slot_size;
5977 if (sp_inc > 0) {
5978 // Save the return address, adjust the stack (make sure it is properly
5979 // 16-byte aligned) and copy the return address to the new top of the stack.
5980 // pop(r13);
5981 sp_inc = align_up(sp_inc, StackAlignmentInBytes);
5982 // DMS CHECK: subptr(rsp, sp_inc);
5983 sub(sp, sp, sp_inc);
5984 // push(r13);
5985 } else {
5986 // The scalarized calling convention needs less stack space than the unscalarized one.
5987 // No need to extend the stack, the caller will take care of these adjustments.
5988 sp_inc = 0;
5989 }
5990
5991 // Initialize register/stack slot states (make all writable)
5992 int max_stack = MAX2(args_on_stack + sp_inc/VMRegImpl::stack_slot_size, args_on_stack_cc);
5993 int max_reg = VMRegImpl::stack2reg(max_stack)->value();
5994 RegState* reg_state = NEW_RESOURCE_ARRAY(RegState, max_reg);
5995 for (int i = 0; i < max_reg; ++i) {
5996 reg_state[i] = reg_writable;
5997 }
5998 // Set all source registers/stack slots to readonly to prevent accidental overwriting
5999 for (int i = 0; i < args_passed; ++i) {
6000 VMReg reg = regs[i].first();
6001 if (!reg->is_valid()) continue;
6002 if (reg->is_stack()) {
6003 // Update source stack location by adding stack increment
6004 reg = VMRegImpl::stack2reg(reg->reg2stack() + sp_inc/VMRegImpl::stack_slot_size);
6005 regs[i] = reg;
6006 }
6007 assert(reg->value() >= 0 && reg->value() < max_reg, "reg value out of bounds");
6008 reg_state[reg->value()] = reg_readonly;
6009 }
6010
6011
6012 // Emit code for unpacking value type arguments
6013 // We try multiple times and eventually start spilling to resolve (circular) dependencies
6014 bool done = false;
6015 for (int i = 0; i < 2 * args_passed_cc && !done; ++i) {
6016 done = true;
6017 bool spill = (i > args_passed_cc); // Start spilling?
6018 // Iterate over all arguments (in reverse)
6019 for (int from_index = args_passed - 1, to_index = args_passed_cc - 1, sig_index = sig_cc->length() - 1; sig_index >= 0; sig_index--) {
6020 if (SigEntry::is_reserved_entry(sig_cc, sig_index)) {
6021 to_index--; // Skip reserved entry
6022 } else {
6023 assert(from_index >= 0, "index out of bounds");
6024 VMReg reg = regs[from_index].first();
6025 if (spill && reg->is_valid() && reg_state[reg->value()] == reg_readonly) {
6026 // Spill argument to be able to write the source and resolve circular dependencies
6027 VMReg spill_reg = r14->as_VMReg();
6028 bool res = move_helper(reg, spill_reg, T_DOUBLE, reg_state, sp_inc);
6029 assert(res, "Spilling should not fail");
6030 // Set spill_reg as new source and update state
6031 reg = spill_reg;
6032 regs[from_index].set1(reg);
6033 reg_state[reg->value()] = reg_readonly;
6034 spill = false; // Do not spill again in this round
6035 }
6036 BasicType bt = sig_cc->at(sig_index)._bt;
6037 if (SigEntry::skip_value_delimiters(sig_cc, sig_index)) {
6038 assert(to_index >= 0, "index out of bounds");
6039 done &= move_helper(reg, regs_cc[to_index].first(), bt, reg_state, sp_inc);
6040 to_index--;
6041 } else if (!receiver_only || (from_index == 0 && bt == T_VOID)) {
6042 done &= unpack_value_helper(sig_cc, sig_index, reg, regs_cc, to_index, reg_state, sp_inc);
6043 } else {
6044 continue;
6045 }
6046 from_index--;
6047 }
6048 }
6049 }
6050 guarantee(done, "Could not resolve circular dependency when unpacking value type arguments");
6051
6052 // Emit code for verified entry and save increment for stack repair on return
6053 verified_entry(C, sp_inc);
6054 }
6055
6056 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[], int ret_off) {
6057 if (reg_state[to->value()] == reg_written) {
6058 return true; // Already written
6059 }
6060 if (from != to && bt != T_VOID) {
6061 if (reg_state[to->value()] == reg_readonly) {
6062 return false; // Not yet writable
6063 }
6064 if (from->is_reg()) {
6065 if (to->is_reg()) {
6066 mov(to->as_Register(), from->as_Register());
6067 } else {
6068 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6069 assert(st_off != ret_off, "overwriting return address at %d", st_off);
6070 Address to_addr = Address(sp, st_off);
6071 str(from->as_Register(), to_addr);
6072 }
6073 } else {
6074 Address from_addr = Address(sp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize);
6075 if (to->is_reg()) {
6076 ldr(to->as_Register(), from_addr);
6077 } else {
6078 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6079 assert(st_off != ret_off, "overwriting return address at %d", st_off);
6080 ldr(rscratch1, from_addr);
6081 str(rscratch1, Address(sp, st_off));
6082 }
6083 }
6084 }
6085 // Update register states
6086 reg_state[from->value()] = reg_writable;
6087 reg_state[to->value()] = reg_written;
6088 return true;
6089 }
6090
6091 bool MacroAssembler::unpack_value_helper(const GrowableArray<SigEntry>* sig, int& sig_index, VMReg from, VMRegPair* regs_to, int& to_index, RegState reg_state[], int ret_off) {
6092 Register fromReg = from->is_reg() ? from->as_Register() : noreg;
6093 assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
6094
6095 int vt = 1;
6096 bool done = true;
6097 bool mark_done = true;
6098 do {
6099 sig_index--;
6100 BasicType bt = sig->at(sig_index)._bt;
6101 if (bt == T_VALUETYPE) {
6102 vt--;
6103 } else if (bt == T_VOID && sig->at(sig_index-1)._bt != T_LONG && sig->at(sig_index-1)._bt != T_DOUBLE) {
6104 vt++;
6105 } else if (SigEntry::is_reserved_entry(sig, sig_index)) {
6106 to_index--; // Ignore this
6107 } else {
6108
6109 assert(to_index >= 0, "invalid to_index");
6110 VMRegPair pair_to = regs_to[to_index--];
6111 VMReg r_1 = pair_to.first();
6112 VMReg r_2 = pair_to.second();
6113
6114 if (bt == T_VOID) continue;
6115
6116 int idx = (int) r_1->value();
6117 if (reg_state[idx] == reg_readonly) {
6118 if (idx != from->value()) {
6119 mark_done = false;
6120 }
6121 done = false;
6122 continue;
6123 } else if (reg_state[idx] == reg_written) {
6124 continue;
6125 } else {
6126 assert(reg_state[idx] == reg_writable, "must be writable");
6127 reg_state[idx] = reg_written;
6128 }
6129
6130 if (fromReg == noreg) {
6131 int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6132 ldr(r10, Address(sp, st_off));
6133 fromReg = r10;
6134 }
6135
6136 int off = sig->at(sig_index)._offset;
6137 assert(off > 0, "offset in object should be positive");
6138
6139 Address fromAddr = Address(fromReg, off);
6140
6141 if (r_1->is_stack()) {
6142 // Convert stack slot to an SP offset (+ wordSize to account for return address )
6143 int st_off = r_1->reg2stack() * VMRegImpl::stack_slot_size;
6144 if (!r_2->is_valid()) {
6145 // sign extend???
6146 ldrsw(rscratch2, fromAddr);
6147 str(rscratch2, Address(sp, st_off));
6148 } else {
6149 ldr(rscratch2, fromAddr);
6150 str(rscratch2, Address(sp, st_off));
6151 }
6152 } else if (r_1->is_Register()) { // Register argument
6153 Register r = r_1->as_Register();
6154 if (r_2->is_valid()) {
6155 ldr(r, fromAddr);
6156 } else {
6157 ldrw(r, fromAddr);
6158 }
6159 } else {
6160 if (!r_2->is_valid()) {
6161 ldrs(r_1->as_FloatRegister(), fromAddr);
6162 } else {
6163 ldrd(r_1->as_FloatRegister(), fromAddr);
6164 }
6165 }
6166
6167 }
6168 } while (vt != 0);
6169
6170 if (mark_done && reg_state[from->value()] != reg_written) {
6171 // This is okay because no one else will write to that slot
6172 reg_state[from->value()] = reg_writable;
6173 }
6174 return done;
6175 }
6176
|