8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "gc/shared/cardTable.hpp"
29 #include "gc/shared/cardTableModRefBS.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "nativeInst_aarch64.hpp"
32 #include "oops/instanceOop.hpp"
33 #include "oops/method.hpp"
34 #include "oops/objArrayKlass.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "prims/methodHandles.hpp"
37 #include "runtime/frame.inline.hpp"
38 #include "runtime/handles.inline.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubCodeGenerator.hpp"
41 #include "runtime/stubRoutines.hpp"
42 #include "runtime/thread.inline.hpp"
43 #include "utilities/align.hpp"
44 #ifdef COMPILER2
45 #include "opto/runtime.hpp"
46 #endif
47
48 #ifdef BUILTIN_SIM
|
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shared/barrierSetCodeGen.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "nativeInst_aarch64.hpp"
32 #include "oops/instanceOop.hpp"
33 #include "oops/method.hpp"
34 #include "oops/objArrayKlass.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "prims/methodHandles.hpp"
37 #include "runtime/frame.inline.hpp"
38 #include "runtime/handles.inline.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubCodeGenerator.hpp"
41 #include "runtime/stubRoutines.hpp"
42 #include "runtime/thread.inline.hpp"
43 #include "utilities/align.hpp"
44 #ifdef COMPILER2
45 #include "opto/runtime.hpp"
46 #endif
47
48 #ifdef BUILTIN_SIM
|
602 __ bind(error);
603 __ ldp(c_rarg3, c_rarg2, Address(__ post(sp, 16)));
604
605 __ push(RegSet::range(r0, r29), sp);
606 // debug(char* msg, int64_t pc, int64_t regs[])
607 __ mov(c_rarg0, rscratch1); // pass address of error message
608 __ mov(c_rarg1, lr); // pass return address
609 __ mov(c_rarg2, sp); // pass address of regs on stack
610 #ifndef PRODUCT
611 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
612 #endif
613 BLOCK_COMMENT("call MacroAssembler::debug");
614 __ mov(rscratch1, CAST_FROM_FN_PTR(address, MacroAssembler::debug64));
615 __ blrt(rscratch1, 3, 0, 1);
616
617 return start;
618 }
619
620 void array_overlap_test(Label& L_no_overlap, Address::sxtw sf) { __ b(L_no_overlap); }
621
622 // Generate code for an array write pre barrier
623 //
624 // addr - starting address
625 // count - element count
626 // tmp - scratch register
627 // saved_regs - registers to be saved before calling static_write_ref_array_pre
628 //
629 // Callers must specify which registers to preserve in saved_regs.
630 // Clobbers: r0-r18, v0-v7, v16-v31, except saved_regs.
631 //
632 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized, RegSet saved_regs) {
633 BarrierSet* bs = Universe::heap()->barrier_set();
634 switch (bs->kind()) {
635 case BarrierSet::G1BarrierSet:
636 // With G1, don't generate the call if we statically know that the target in uninitialized
637 if (!dest_uninitialized) {
638 __ push(saved_regs, sp);
639 if (count == c_rarg0) {
640 if (addr == c_rarg1) {
641 // exactly backwards!!
642 __ mov(rscratch1, c_rarg0);
643 __ mov(c_rarg0, c_rarg1);
644 __ mov(c_rarg1, rscratch1);
645 } else {
646 __ mov(c_rarg1, count);
647 __ mov(c_rarg0, addr);
648 }
649 } else {
650 __ mov(c_rarg0, addr);
651 __ mov(c_rarg1, count);
652 }
653 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
654 __ pop(saved_regs, sp);
655 break;
656 case BarrierSet::CardTableModRef:
657 break;
658 default:
659 ShouldNotReachHere();
660
661 }
662 }
663 }
664
665 //
666 // Generate code for an array write post barrier
667 //
668 // Input:
669 // start - register containing starting address of destination array
670 // end - register containing ending address of destination array
671 // scratch - scratch register
672 // saved_regs - registers to be saved before calling static_write_ref_array_post
673 //
674 // The input registers are overwritten.
675 // The ending address is inclusive.
676 // Callers must specify which registers to preserve in saved_regs.
677 // Clobbers: r0-r18, v0-v7, v16-v31, except saved_regs.
678 void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch, RegSet saved_regs) {
679 assert_different_registers(start, end, scratch);
680 BarrierSet* bs = Universe::heap()->barrier_set();
681 switch (bs->kind()) {
682 case BarrierSet::G1BarrierSet:
683
684 {
685 __ push(saved_regs, sp);
686 // must compute element count unless barrier set interface is changed (other platforms supply count)
687 assert_different_registers(start, end, scratch);
688 __ lea(scratch, Address(end, BytesPerHeapOop));
689 __ sub(scratch, scratch, start); // subtract start to get #bytes
690 __ lsr(scratch, scratch, LogBytesPerHeapOop); // convert to element count
691 __ mov(c_rarg0, start);
692 __ mov(c_rarg1, scratch);
693 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
694 __ pop(saved_regs, sp);
695 }
696 break;
697 case BarrierSet::CardTableModRef:
698 {
699 CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
700 CardTable* ct = ctbs->card_table();
701 assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
702
703 Label L_loop;
704
705 __ lsr(start, start, CardTable::card_shift);
706 __ lsr(end, end, CardTable::card_shift);
707 __ sub(end, end, start); // number of bytes to copy
708
709 const Register count = end; // 'end' register contains bytes count now
710 __ load_byte_map_base(scratch);
711 __ add(start, start, scratch);
712 if (UseConcMarkSweepGC) {
713 __ membar(__ StoreStore);
714 }
715 __ BIND(L_loop);
716 __ strb(zr, Address(start, count));
717 __ subs(count, count, 1);
718 __ br(Assembler::GE, L_loop);
719 }
720 break;
721 default:
722 ShouldNotReachHere();
723
724 }
725 }
726
727 // The inner part of zero_words(). This is the bulk operation,
728 // zeroing words in blocks, possibly using DC ZVA to do it. The
729 // caller is responsible for zeroing the last few words.
730 //
731 // Inputs:
732 // r10: the HeapWord-aligned base address of an array to zero.
733 // r11: the count in HeapWords, r11 > 0.
734 //
735 // Returns r10 and r11, adjusted for the caller to clear.
736 // r10: the base address of the tail of words left to clear.
737 // r11: the number of words in the tail.
738 // r11 < MacroAssembler::zero_words_block_size.
739
740 address generate_zero_blocks() {
741 Label store_pair, loop_store_pair, done;
742 Label base_aligned;
743
744 Register base = r10, cnt = r11;
745
|
602 __ bind(error);
603 __ ldp(c_rarg3, c_rarg2, Address(__ post(sp, 16)));
604
605 __ push(RegSet::range(r0, r29), sp);
606 // debug(char* msg, int64_t pc, int64_t regs[])
607 __ mov(c_rarg0, rscratch1); // pass address of error message
608 __ mov(c_rarg1, lr); // pass return address
609 __ mov(c_rarg2, sp); // pass address of regs on stack
610 #ifndef PRODUCT
611 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
612 #endif
613 BLOCK_COMMENT("call MacroAssembler::debug");
614 __ mov(rscratch1, CAST_FROM_FN_PTR(address, MacroAssembler::debug64));
615 __ blrt(rscratch1, 3, 0, 1);
616
617 return start;
618 }
619
620 void array_overlap_test(Label& L_no_overlap, Address::sxtw sf) { __ b(L_no_overlap); }
621
622 // The inner part of zero_words(). This is the bulk operation,
623 // zeroing words in blocks, possibly using DC ZVA to do it. The
624 // caller is responsible for zeroing the last few words.
625 //
626 // Inputs:
627 // r10: the HeapWord-aligned base address of an array to zero.
628 // r11: the count in HeapWords, r11 > 0.
629 //
630 // Returns r10 and r11, adjusted for the caller to clear.
631 // r10: the base address of the tail of words left to clear.
632 // r11: the number of words in the tail.
633 // r11 < MacroAssembler::zero_words_block_size.
634
635 address generate_zero_blocks() {
636 Label store_pair, loop_store_pair, done;
637 Label base_aligned;
638
639 Register base = r10, cnt = r11;
640
|
1438 //
1439 // Side Effects:
1440 // disjoint_int_copy_entry is set to the no-overlap entry point
1441 // used by generate_conjoint_int_oop_copy().
1442 //
1443 address generate_disjoint_copy(size_t size, bool aligned, bool is_oop, address *entry,
1444 const char *name, bool dest_uninitialized = false) {
1445 Register s = c_rarg0, d = c_rarg1, count = c_rarg2;
1446 RegSet saved_reg = RegSet::of(s, d, count);
1447 __ align(CodeEntryAlignment);
1448 StubCodeMark mark(this, "StubRoutines", name);
1449 address start = __ pc();
1450 __ enter();
1451
1452 if (entry != NULL) {
1453 *entry = __ pc();
1454 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1455 BLOCK_COMMENT("Entry:");
1456 }
1457
1458 if (is_oop) {
1459 gen_write_ref_array_pre_barrier(d, count, dest_uninitialized, saved_reg);
1460 // save regs before copy_memory
1461 __ push(RegSet::of(d, count), sp);
1462 }
1463 copy_memory(aligned, s, d, count, rscratch1, size);
1464 if (is_oop) {
1465 __ pop(RegSet::of(d, count), sp);
1466 if (VerifyOops)
1467 verify_oop_array(size, d, count, r16);
1468 __ sub(count, count, 1); // make an inclusive end pointer
1469 __ lea(count, Address(d, count, Address::lsl(exact_log2(size))));
1470 gen_write_ref_array_post_barrier(d, count, rscratch1, RegSet());
1471 }
1472 __ leave();
1473 __ mov(r0, zr); // return 0
1474 __ ret(lr);
1475 #ifdef BUILTIN_SIM
1476 {
1477 AArch64Simulator *sim = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck);
1478 sim->notifyCompile(const_cast<char*>(name), start);
1479 }
1480 #endif
1481 return start;
1482 }
1483
1484 // Arguments:
1485 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1486 // ignored
1487 // is_oop - true => oop array, so generate store check code
1488 // name - stub name string
1489 //
1490 // Inputs:
|
1333 //
1334 // Side Effects:
1335 // disjoint_int_copy_entry is set to the no-overlap entry point
1336 // used by generate_conjoint_int_oop_copy().
1337 //
1338 address generate_disjoint_copy(size_t size, bool aligned, bool is_oop, address *entry,
1339 const char *name, bool dest_uninitialized = false) {
1340 Register s = c_rarg0, d = c_rarg1, count = c_rarg2;
1341 RegSet saved_reg = RegSet::of(s, d, count);
1342 __ align(CodeEntryAlignment);
1343 StubCodeMark mark(this, "StubRoutines", name);
1344 address start = __ pc();
1345 __ enter();
1346
1347 if (entry != NULL) {
1348 *entry = __ pc();
1349 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1350 BLOCK_COMMENT("Entry:");
1351 }
1352
1353 BarrierSetCodeGen *bs = Universe::heap()->barrier_set()->code_gen();
1354 DecoratorSet decorators = ARRAYCOPY_DISJOINT;
1355 if (dest_uninitialized) {
1356 decorators |= AS_DEST_NOT_INITIALIZED;
1357 }
1358 if (aligned) {
1359 decorators |= ARRAYCOPY_ALIGNED;
1360 }
1361
1362 bs->arraycopy_prologue(_masm, decorators, is_oop, d, count, saved_reg);
1363
1364 if (is_oop) {
1365 // save regs before copy_memory
1366 __ push(RegSet::of(d, count), sp);
1367 }
1368 copy_memory(aligned, s, d, count, rscratch1, size);
1369
1370 if (is_oop) {
1371 __ pop(RegSet::of(d, count), sp);
1372 if (VerifyOops)
1373 verify_oop_array(size, d, count, r16);
1374 __ sub(count, count, 1); // make an inclusive end pointer
1375 __ lea(count, Address(d, count, Address::lsl(exact_log2(size))));
1376 }
1377
1378 bs->arraycopy_epilogue(_masm, decorators, is_oop, d, count, rscratch1, RegSet());
1379
1380 __ leave();
1381 __ mov(r0, zr); // return 0
1382 __ ret(lr);
1383 #ifdef BUILTIN_SIM
1384 {
1385 AArch64Simulator *sim = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck);
1386 sim->notifyCompile(const_cast<char*>(name), start);
1387 }
1388 #endif
1389 return start;
1390 }
1391
1392 // Arguments:
1393 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1394 // ignored
1395 // is_oop - true => oop array, so generate store check code
1396 // name - stub name string
1397 //
1398 // Inputs:
|
1499 address generate_conjoint_copy(size_t size, bool aligned, bool is_oop, address nooverlap_target,
1500 address *entry, const char *name,
1501 bool dest_uninitialized = false) {
1502 Register s = c_rarg0, d = c_rarg1, count = c_rarg2;
1503 RegSet saved_regs = RegSet::of(s, d, count);
1504 StubCodeMark mark(this, "StubRoutines", name);
1505 address start = __ pc();
1506 __ enter();
1507
1508 if (entry != NULL) {
1509 *entry = __ pc();
1510 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1511 BLOCK_COMMENT("Entry:");
1512 }
1513
1514 // use fwd copy when (d-s) above_equal (count*size)
1515 __ sub(rscratch1, d, s);
1516 __ cmp(rscratch1, count, Assembler::LSL, exact_log2(size));
1517 __ br(Assembler::HS, nooverlap_target);
1518
1519 if (is_oop) {
1520 gen_write_ref_array_pre_barrier(d, count, dest_uninitialized, saved_regs);
1521 // save regs before copy_memory
1522 __ push(RegSet::of(d, count), sp);
1523 }
1524 copy_memory(aligned, s, d, count, rscratch1, -size);
1525 if (is_oop) {
1526 __ pop(RegSet::of(d, count), sp);
1527 if (VerifyOops)
1528 verify_oop_array(size, d, count, r16);
1529 __ sub(count, count, 1); // make an inclusive end pointer
1530 __ lea(count, Address(d, count, Address::lsl(exact_log2(size))));
1531 gen_write_ref_array_post_barrier(d, count, rscratch1, RegSet());
1532 }
1533 __ leave();
1534 __ mov(r0, zr); // return 0
1535 __ ret(lr);
1536 #ifdef BUILTIN_SIM
1537 {
1538 AArch64Simulator *sim = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck);
1539 sim->notifyCompile(const_cast<char*>(name), start);
1540 }
1541 #endif
1542 return start;
1543 }
1544
1545 // Arguments:
1546 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1547 // ignored
1548 // name - stub name string
1549 //
1550 // Inputs:
1551 // c_rarg0 - source array address
|
1407 address generate_conjoint_copy(size_t size, bool aligned, bool is_oop, address nooverlap_target,
1408 address *entry, const char *name,
1409 bool dest_uninitialized = false) {
1410 Register s = c_rarg0, d = c_rarg1, count = c_rarg2;
1411 RegSet saved_regs = RegSet::of(s, d, count);
1412 StubCodeMark mark(this, "StubRoutines", name);
1413 address start = __ pc();
1414 __ enter();
1415
1416 if (entry != NULL) {
1417 *entry = __ pc();
1418 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1419 BLOCK_COMMENT("Entry:");
1420 }
1421
1422 // use fwd copy when (d-s) above_equal (count*size)
1423 __ sub(rscratch1, d, s);
1424 __ cmp(rscratch1, count, Assembler::LSL, exact_log2(size));
1425 __ br(Assembler::HS, nooverlap_target);
1426
1427 BarrierSetCodeGen *bs = Universe::heap()->barrier_set()->code_gen();
1428 DecoratorSet decorators = 0;
1429 if (dest_uninitialized) {
1430 decorators |= AS_DEST_NOT_INITIALIZED;
1431 }
1432 if (aligned) {
1433 decorators |= ARRAYCOPY_ALIGNED;
1434 }
1435 bs->arraycopy_prologue(_masm, decorators, is_oop, d, count, saved_regs);
1436
1437 if (is_oop) {
1438 // save regs before copy_memory
1439 __ push(RegSet::of(d, count), sp);
1440 }
1441 copy_memory(aligned, s, d, count, rscratch1, -size);
1442 if (is_oop) {
1443 __ pop(RegSet::of(d, count), sp);
1444 if (VerifyOops)
1445 verify_oop_array(size, d, count, r16);
1446 __ sub(count, count, 1); // make an inclusive end pointer
1447 __ lea(count, Address(d, count, Address::lsl(exact_log2(size))));
1448 }
1449 bs->arraycopy_epilogue(_masm, decorators, is_oop, d, count, rscratch1, RegSet());
1450 __ leave();
1451 __ mov(r0, zr); // return 0
1452 __ ret(lr);
1453 #ifdef BUILTIN_SIM
1454 {
1455 AArch64Simulator *sim = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck);
1456 sim->notifyCompile(const_cast<char*>(name), start);
1457 }
1458 #endif
1459 return start;
1460 }
1461
1462 // Arguments:
1463 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1464 // ignored
1465 // name - stub name string
1466 //
1467 // Inputs:
1468 // c_rarg0 - source array address
|
1853
1854 // Empty array: Nothing to do.
1855 __ cbz(count, L_done);
1856
1857 __ push(RegSet::of(r18, r19, r20, r21), sp);
1858
1859 #ifdef ASSERT
1860 BLOCK_COMMENT("assert consistent ckoff/ckval");
1861 // The ckoff and ckval must be mutually consistent,
1862 // even though caller generates both.
1863 { Label L;
1864 int sco_offset = in_bytes(Klass::super_check_offset_offset());
1865 __ ldrw(start_to, Address(ckval, sco_offset));
1866 __ cmpw(ckoff, start_to);
1867 __ br(Assembler::EQ, L);
1868 __ stop("super_check_offset inconsistent");
1869 __ bind(L);
1870 }
1871 #endif //ASSERT
1872
1873 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized, wb_pre_saved_regs);
1874
1875 // save the original count
1876 __ mov(count_save, count);
1877
1878 // Copy from low to high addresses
1879 __ mov(start_to, to); // Save destination array start address
1880 __ b(L_load_element);
1881
1882 // ======== begin loop ========
1883 // (Loop is rotated; its entry is L_load_element.)
1884 // Loop control:
1885 // for (; count != 0; count--) {
1886 // copied_oop = load_heap_oop(from++);
1887 // ... generate_type_check ...;
1888 // store_heap_oop(to++, copied_oop);
1889 // }
1890 __ align(OptoLoopAlignment);
1891
1892 __ BIND(L_store_element);
|
1770
1771 // Empty array: Nothing to do.
1772 __ cbz(count, L_done);
1773
1774 __ push(RegSet::of(r18, r19, r20, r21), sp);
1775
1776 #ifdef ASSERT
1777 BLOCK_COMMENT("assert consistent ckoff/ckval");
1778 // The ckoff and ckval must be mutually consistent,
1779 // even though caller generates both.
1780 { Label L;
1781 int sco_offset = in_bytes(Klass::super_check_offset_offset());
1782 __ ldrw(start_to, Address(ckval, sco_offset));
1783 __ cmpw(ckoff, start_to);
1784 __ br(Assembler::EQ, L);
1785 __ stop("super_check_offset inconsistent");
1786 __ bind(L);
1787 }
1788 #endif //ASSERT
1789
1790 BarrierSetCodeGen *bs = Universe::heap()->barrier_set()->code_gen();
1791 DecoratorSet decorators = ARRAYCOPY_CHECKCAST;
1792 bool is_oop = true;
1793 if (dest_uninitialized) {
1794 decorators |= AS_DEST_NOT_INITIALIZED;
1795 }
1796
1797 bs->arraycopy_prologue(_masm, decorators, is_oop, to, count, wb_pre_saved_regs);
1798
1799 // save the original count
1800 __ mov(count_save, count);
1801
1802 // Copy from low to high addresses
1803 __ mov(start_to, to); // Save destination array start address
1804 __ b(L_load_element);
1805
1806 // ======== begin loop ========
1807 // (Loop is rotated; its entry is L_load_element.)
1808 // Loop control:
1809 // for (; count != 0; count--) {
1810 // copied_oop = load_heap_oop(from++);
1811 // ... generate_type_check ...;
1812 // store_heap_oop(to++, copied_oop);
1813 // }
1814 __ align(OptoLoopAlignment);
1815
1816 __ BIND(L_store_element);
|
1897 // ======== loop entry is here ========
1898 __ BIND(L_load_element);
1899 __ load_heap_oop(copied_oop, __ post(from, UseCompressedOops ? 4 : 8)); // load the oop
1900 __ cbz(copied_oop, L_store_element);
1901
1902 __ load_klass(r19_klass, copied_oop);// query the object klass
1903 generate_type_check(r19_klass, ckoff, ckval, L_store_element);
1904 // ======== end loop ========
1905
1906 // It was a real error; we must depend on the caller to finish the job.
1907 // Register count = remaining oops, count_orig = total oops.
1908 // Emit GC store barriers for the oops we have copied and report
1909 // their number to the caller.
1910
1911 __ subs(count, count_save, count); // K = partially copied oop count
1912 __ eon(count, count, zr); // report (-1^K) to caller
1913 __ br(Assembler::EQ, L_done_pop);
1914
1915 __ BIND(L_do_card_marks);
1916 __ add(to, to, -heapOopSize); // make an inclusive end pointer
1917 gen_write_ref_array_post_barrier(start_to, to, rscratch1, wb_post_saved_regs);
1918
1919 __ bind(L_done_pop);
1920 __ pop(RegSet::of(r18, r19, r20, r21), sp);
1921 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr);
1922
1923 __ bind(L_done);
1924 __ mov(r0, count);
1925 __ leave();
1926 __ ret(lr);
1927
1928 return start;
1929 }
1930
1931 // Perform range checks on the proposed arraycopy.
1932 // Kills temp, but nothing else.
1933 // Also, clean the sign bits of src_pos and dst_pos.
1934 void arraycopy_range_checks(Register src, // source array oop (c_rarg0)
1935 Register src_pos, // source position (c_rarg1)
1936 Register dst, // destination array oo (c_rarg2)
|
1821 // ======== loop entry is here ========
1822 __ BIND(L_load_element);
1823 __ load_heap_oop(copied_oop, __ post(from, UseCompressedOops ? 4 : 8)); // load the oop
1824 __ cbz(copied_oop, L_store_element);
1825
1826 __ load_klass(r19_klass, copied_oop);// query the object klass
1827 generate_type_check(r19_klass, ckoff, ckval, L_store_element);
1828 // ======== end loop ========
1829
1830 // It was a real error; we must depend on the caller to finish the job.
1831 // Register count = remaining oops, count_orig = total oops.
1832 // Emit GC store barriers for the oops we have copied and report
1833 // their number to the caller.
1834
1835 __ subs(count, count_save, count); // K = partially copied oop count
1836 __ eon(count, count, zr); // report (-1^K) to caller
1837 __ br(Assembler::EQ, L_done_pop);
1838
1839 __ BIND(L_do_card_marks);
1840 __ add(to, to, -heapOopSize); // make an inclusive end pointer
1841 bs->arraycopy_epilogue(_masm, decorators, is_oop, start_to, to, rscratch1, wb_post_saved_regs);
1842
1843 __ bind(L_done_pop);
1844 __ pop(RegSet::of(r18, r19, r20, r21), sp);
1845 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr);
1846
1847 __ bind(L_done);
1848 __ mov(r0, count);
1849 __ leave();
1850 __ ret(lr);
1851
1852 return start;
1853 }
1854
1855 // Perform range checks on the proposed arraycopy.
1856 // Kills temp, but nothing else.
1857 // Also, clean the sign bits of src_pos and dst_pos.
1858 void arraycopy_range_checks(Register src, // source array oop (c_rarg0)
1859 Register src_pos, // source position (c_rarg1)
1860 Register dst, // destination array oo (c_rarg2)
|