8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "registerSaver_s390.hpp"
28 #include "gc/shared/cardTable.hpp"
29 #include "gc/shared/cardTableBarrierSet.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "interpreter/interp_masm.hpp"
32 #include "nativeInst_s390.hpp"
33 #include "oops/instanceOop.hpp"
34 #include "oops/objArrayKlass.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "prims/methodHandles.hpp"
37 #include "runtime/frame.inline.hpp"
38 #include "runtime/handles.inline.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubCodeGenerator.hpp"
41 #include "runtime/stubRoutines.hpp"
42 #include "runtime/thread.inline.hpp"
43
44 // Declaration and definition of StubGenerator (no .hpp file).
45 // For a more detailed description of the stub routine structure
46 // see the comment in stubRoutines.hpp.
47
48 #ifdef PRODUCT
|
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "registerSaver_s390.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shared/barrierSetAssembler.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "interpreter/interp_masm.hpp"
32 #include "nativeInst_s390.hpp"
33 #include "oops/instanceOop.hpp"
34 #include "oops/objArrayKlass.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "prims/methodHandles.hpp"
37 #include "runtime/frame.inline.hpp"
38 #include "runtime/handles.inline.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubCodeGenerator.hpp"
41 #include "runtime/stubRoutines.hpp"
42 #include "runtime/thread.inline.hpp"
43
44 // Declaration and definition of StubGenerator (no .hpp file).
45 // For a more detailed description of the stub routine structure
46 // see the comment in stubRoutines.hpp.
47
48 #ifdef PRODUCT
|
668 __ z_br(Z_R14);
669
670 return start;
671 }
672
673 // Return address of code to be called from code generated by
674 // MacroAssembler::verify_oop.
675 //
676 // Don't generate, rather use C++ code.
677 address generate_verify_oop_subroutine() {
678 // Don't generate a StubCodeMark, because no code is generated!
679 // Generating the mark triggers notifying the oprofile jvmti agent
680 // about the dynamic code generation, but the stub without
681 // code (code_size == 0) confuses opjitconv
682 // StubCodeMark mark(this, "StubRoutines", "verify_oop_stub");
683
684 address start = 0;
685 return start;
686 }
687
688 // Generate pre-write barrier for array.
689 //
690 // Input:
691 // addr - register containing starting address
692 // count - register containing element count
693 //
694 // The input registers are overwritten.
695 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
696
697 BarrierSet* const bs = Universe::heap()->barrier_set();
698 switch (bs->kind()) {
699 case BarrierSet::G1BarrierSet:
700 // With G1, don't generate the call if we statically know that the target is uninitialized.
701 if (!dest_uninitialized) {
702 // Is marking active?
703 Label filtered;
704 assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame()
705 assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame()
706 Register Rtmp1 = Z_R0_scratch;
707 const int active_offset = in_bytes(JavaThread::satb_mark_queue_offset() +
708 SATBMarkQueue::byte_offset_of_active());
709 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
710 __ load_and_test_int(Rtmp1, Address(Z_thread, active_offset));
711 } else {
712 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
713 __ load_and_test_byte(Rtmp1, Address(Z_thread, active_offset));
714 }
715 __ z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently.
716
717 // __ push_frame_abi160(0); // implicitly done in save_live_registers()
718 (void) RegisterSaver::save_live_registers(_masm, RegisterSaver::arg_registers);
719 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), addr, count);
720 (void) RegisterSaver::restore_live_registers(_masm, RegisterSaver::arg_registers);
721 // __ pop_frame(); // implicitly done in restore_live_registers()
722
723 __ bind(filtered);
724 }
725 break;
726 case BarrierSet::CardTableBarrierSet:
727 case BarrierSet::ModRef:
728 break;
729 default:
730 ShouldNotReachHere();
731 }
732 }
733
734 // Generate post-write barrier for array.
735 //
736 // Input:
737 // addr - register containing starting address
738 // count - register containing element count
739 //
740 // The input registers are overwritten.
741 void gen_write_ref_array_post_barrier(Register addr, Register count, bool branchToEnd) {
742 BarrierSet* const bs = Universe::heap()->barrier_set();
743 switch (bs->kind()) {
744 case BarrierSet::G1BarrierSet:
745 {
746 if (branchToEnd) {
747 assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame()
748 assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame()
749 // __ push_frame_abi160(0); // implicitly done in save_live_registers()
750 (void) RegisterSaver::save_live_registers(_masm, RegisterSaver::arg_registers);
751 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
752 (void) RegisterSaver::restore_live_registers(_masm, RegisterSaver::arg_registers);
753 // __ pop_frame(); // implicitly done in restore_live_registers()
754 } else {
755 // Tail call: call c and return to stub caller.
756 address entry_point = CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post);
757 __ lgr_if_needed(Z_ARG1, addr);
758 __ lgr_if_needed(Z_ARG2, count);
759 __ load_const(Z_R1, entry_point);
760 __ z_br(Z_R1); // Branch without linking, callee will return to stub caller.
761 }
762 }
763 break;
764 case BarrierSet::CardTableBarrierSet:
765 // These cases formerly known as
766 // void array_store_check(Register addr, Register count, bool branchToEnd).
767 {
768 NearLabel doXC, done;
769 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
770 CardTable* ct = ctbs->card_table();
771 assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
772 assert_different_registers(Z_R0, Z_R1, addr, count);
773
774 // Nothing to do if count <= 0.
775 if (branchToEnd) {
776 __ compare64_and_branch(count, (intptr_t) 0, Assembler::bcondNotHigh, done);
777 } else {
778 __ z_ltgr(count, count);
779 __ z_bcr(Assembler::bcondNotPositive, Z_R14);
780 }
781
782 // Note: We can't combine the shifts. We could lose a carry
783 // from calculating the array end address.
784 // count = (count-1)*BytesPerHeapOop + addr
785 // Count holds addr of last oop in array then.
786 __ z_sllg(count, count, LogBytesPerHeapOop);
787 __ add2reg_with_index(count, -BytesPerHeapOop, count, addr);
788
789 // Get base address of card table.
790 __ load_const_optimized(Z_R1, (address)ct->byte_map_base());
791
792 // count = (count>>shift) - (addr>>shift)
793 __ z_srlg(addr, addr, CardTable::card_shift);
794 __ z_srlg(count, count, CardTable::card_shift);
795
796 // Prefetch first elements of card table for update.
797 if (VM_Version::has_Prefetch()) {
798 __ z_pfd(0x02, 0, addr, Z_R1);
799 }
800
801 // Special case: clear just one byte.
802 __ clear_reg(Z_R0, true, false); // Used for doOneByte.
803 __ z_sgr(count, addr); // Count = n-1 now, CC used for brc below.
804 __ z_stc(Z_R0, 0, addr, Z_R1); // Must preserve CC from z_sgr.
805 if (branchToEnd) {
806 __ z_brz(done);
807 } else {
808 __ z_bcr(Assembler::bcondZero, Z_R14);
809 }
810
811 __ z_cghi(count, 255);
812 __ z_brnh(doXC);
813
814 // MVCLE: clear a long area.
815 // Start addr of card table range = base + addr.
816 // # bytes in card table range = (count + 1)
817 __ add2reg_with_index(Z_R0, 0, Z_R1, addr);
818 __ add2reg(Z_R1, 1, count);
819
820 // dirty hack:
821 // There are just two callers. Both pass
822 // count in Z_ARG3 = Z_R4
823 // addr in Z_ARG2 = Z_R3
824 // ==> use Z_ARG2 as src len reg = 0
825 // Z_ARG1 as src addr (ignored)
826 assert(count == Z_ARG3, "count: unexpected register number");
827 assert(addr == Z_ARG2, "addr: unexpected register number");
828 __ clear_reg(Z_ARG2, true, false);
829
830 __ MacroAssembler::move_long_ext(Z_R0, Z_ARG1, 0);
831
832 if (branchToEnd) {
833 __ z_bru(done);
834 } else {
835 __ z_bcr(Assembler::bcondAlways, Z_R14);
836 }
837
838 // XC: clear a short area.
839 Label XC_template; // Instr template, never exec directly!
840 __ bind(XC_template);
841 __ z_xc(0, 0, addr, 0, addr);
842
843 __ bind(doXC);
844 // start addr of card table range = base + addr
845 // end addr of card table range = base + addr + count
846 __ add2reg_with_index(addr, 0, Z_R1, addr);
847
848 if (VM_Version::has_ExecuteExtensions()) {
849 __ z_exrl(count, XC_template); // Execute XC with var. len.
850 } else {
851 __ z_larl(Z_R1, XC_template);
852 __ z_ex(count, 0, Z_R0, Z_R1); // Execute XC with var. len.
853 }
854 if (!branchToEnd) {
855 __ z_br(Z_R14);
856 }
857
858 __ bind(done);
859 }
860 break;
861 case BarrierSet::ModRef:
862 if (!branchToEnd) { __ z_br(Z_R14); }
863 break;
864 default:
865 ShouldNotReachHere();
866 }
867 }
868
869
870 // This is to test that the count register contains a positive int value.
871 // Required because C2 does not respect int to long conversion for stub calls.
872 void assert_positive_int(Register count) {
873 #ifdef ASSERT
874 __ z_srag(Z_R0, count, 31); // Just leave the sign (must be zero) in Z_R0.
875 __ asm_assert_eq("missing zero extend", 0xAFFE);
876 #endif
877 }
878
879 // Generate overlap test for array copy stubs.
880 // If no actual overlap is detected, control is transferred to the
881 // "normal" copy stub (entry address passed in disjoint_copy_target).
882 // Otherwise, execution continues with the code generated by the
883 // caller of array_overlap_test.
884 //
885 // Input:
886 // Z_ARG1 - from
887 // Z_ARG2 - to
888 // Z_ARG3 - element count
|
668 __ z_br(Z_R14);
669
670 return start;
671 }
672
673 // Return address of code to be called from code generated by
674 // MacroAssembler::verify_oop.
675 //
676 // Don't generate, rather use C++ code.
677 address generate_verify_oop_subroutine() {
678 // Don't generate a StubCodeMark, because no code is generated!
679 // Generating the mark triggers notifying the oprofile jvmti agent
680 // about the dynamic code generation, but the stub without
681 // code (code_size == 0) confuses opjitconv
682 // StubCodeMark mark(this, "StubRoutines", "verify_oop_stub");
683
684 address start = 0;
685 return start;
686 }
687
688 // This is to test that the count register contains a positive int value.
689 // Required because C2 does not respect int to long conversion for stub calls.
690 void assert_positive_int(Register count) {
691 #ifdef ASSERT
692 __ z_srag(Z_R0, count, 31); // Just leave the sign (must be zero) in Z_R0.
693 __ asm_assert_eq("missing zero extend", 0xAFFE);
694 #endif
695 }
696
697 // Generate overlap test for array copy stubs.
698 // If no actual overlap is detected, control is transferred to the
699 // "normal" copy stub (entry address passed in disjoint_copy_target).
700 // Otherwise, execution continues with the code generated by the
701 // caller of array_overlap_test.
702 //
703 // Input:
704 // Z_ARG1 - from
705 // Z_ARG2 - to
706 // Z_ARG3 - element count
|
1464 }
1465
1466
1467 address generate_disjoint_long_copy(bool aligned, const char * name) {
1468 StubCodeMark mark(this, "StubRoutines", name);
1469 // This is the zarch specific stub generator for long array copy.
1470 // Refer to generate_disjoint_copy for a list of prereqs and features:
1471 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
1472 generate_disjoint_copy(aligned, 8, false, false);
1473 return __ addr_at(start_off);
1474 }
1475
1476
1477 address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
1478 StubCodeMark mark(this, "StubRoutines", name);
1479 // This is the zarch specific stub generator for oop array copy.
1480 // Refer to generate_disjoint_copy for a list of prereqs and features.
1481 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
1482 unsigned int size = UseCompressedOops ? 4 : 8;
1483
1484 gen_write_ref_array_pre_barrier(Z_ARG2, Z_ARG3, dest_uninitialized);
1485
1486 generate_disjoint_copy(aligned, size, true, true);
1487
1488 gen_write_ref_array_post_barrier(Z_ARG2, Z_ARG3, false);
1489
1490 return __ addr_at(start_off);
1491 }
1492
1493
1494 address generate_conjoint_byte_copy(bool aligned, const char * name) {
1495 StubCodeMark mark(this, "StubRoutines", name);
1496 // This is the zarch specific stub generator for overlapping byte array copy.
1497 // Refer to generate_conjoint_copy for a list of prereqs and features:
1498 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
1499 address nooverlap_target = aligned ? StubRoutines::arrayof_jbyte_disjoint_arraycopy()
1500 : StubRoutines::jbyte_disjoint_arraycopy();
1501
1502 array_overlap_test(nooverlap_target, 0); // Branch away to nooverlap_target if disjoint.
1503 generate_conjoint_copy(aligned, 1, false);
1504
1505 return __ addr_at(start_off);
1506 }
1507
|
1282 }
1283
1284
1285 address generate_disjoint_long_copy(bool aligned, const char * name) {
1286 StubCodeMark mark(this, "StubRoutines", name);
1287 // This is the zarch specific stub generator for long array copy.
1288 // Refer to generate_disjoint_copy for a list of prereqs and features:
1289 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
1290 generate_disjoint_copy(aligned, 8, false, false);
1291 return __ addr_at(start_off);
1292 }
1293
1294
1295 address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
1296 StubCodeMark mark(this, "StubRoutines", name);
1297 // This is the zarch specific stub generator for oop array copy.
1298 // Refer to generate_disjoint_copy for a list of prereqs and features.
1299 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
1300 unsigned int size = UseCompressedOops ? 4 : 8;
1301
1302 DecoratorSet decorators = ARRAYCOPY_DISJOINT;
1303 if (dest_uninitialized) {
1304 decorators |= AS_DEST_NOT_INITIALIZED;
1305 }
1306 if (aligned) {
1307 decorators |= ARRAYCOPY_ALIGNED;
1308 }
1309
1310 BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
1311 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, Z_ARG1, Z_ARG2, Z_ARG3);
1312
1313 generate_disjoint_copy(aligned, size, true, true);
1314
1315 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, Z_ARG2, Z_ARG3, true);
1316
1317 return __ addr_at(start_off);
1318 }
1319
1320
1321 address generate_conjoint_byte_copy(bool aligned, const char * name) {
1322 StubCodeMark mark(this, "StubRoutines", name);
1323 // This is the zarch specific stub generator for overlapping byte array copy.
1324 // Refer to generate_conjoint_copy for a list of prereqs and features:
1325 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
1326 address nooverlap_target = aligned ? StubRoutines::arrayof_jbyte_disjoint_arraycopy()
1327 : StubRoutines::jbyte_disjoint_arraycopy();
1328
1329 array_overlap_test(nooverlap_target, 0); // Branch away to nooverlap_target if disjoint.
1330 generate_conjoint_copy(aligned, 1, false);
1331
1332 return __ addr_at(start_off);
1333 }
1334
|
1547 array_overlap_test(nooverlap_target, 3); // Branch away to nooverlap_target if disjoint.
1548 generate_conjoint_copy(aligned, 8, false);
1549
1550 return __ addr_at(start_off);
1551 }
1552
1553 address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
1554 StubCodeMark mark(this, "StubRoutines", name);
1555 // This is the zarch specific stub generator for overlapping oop array copy.
1556 // Refer to generate_conjoint_copy for a list of prereqs and features.
1557 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
1558 unsigned int size = UseCompressedOops ? 4 : 8;
1559 unsigned int shift = UseCompressedOops ? 2 : 3;
1560
1561 address nooverlap_target = aligned ? StubRoutines::arrayof_oop_disjoint_arraycopy(dest_uninitialized)
1562 : StubRoutines::oop_disjoint_arraycopy(dest_uninitialized);
1563
1564 // Branch to disjoint_copy (if applicable) before pre_barrier to avoid double pre_barrier.
1565 array_overlap_test(nooverlap_target, shift); // Branch away to nooverlap_target if disjoint.
1566
1567 gen_write_ref_array_pre_barrier(Z_ARG2, Z_ARG3, dest_uninitialized);
1568
1569 generate_conjoint_copy(aligned, size, true); // Must preserve ARG2, ARG3.
1570
1571 gen_write_ref_array_post_barrier(Z_ARG2, Z_ARG3, false);
1572
1573 return __ addr_at(start_off);
1574 }
1575
1576
1577 void generate_arraycopy_stubs() {
1578
1579 // Note: the disjoint stubs must be generated first, some of
1580 // the conjoint stubs use them.
1581 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy (false, "jbyte_disjoint_arraycopy");
1582 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
1583 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy (false, "jint_disjoint_arraycopy");
1584 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy (false, "jlong_disjoint_arraycopy");
1585 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy (false, "oop_disjoint_arraycopy", false);
1586 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (false, "oop_disjoint_arraycopy_uninit", true);
1587
1588 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy (true, "arrayof_jbyte_disjoint_arraycopy");
1589 StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy");
1590 StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy (true, "arrayof_jint_disjoint_arraycopy");
|
1374 array_overlap_test(nooverlap_target, 3); // Branch away to nooverlap_target if disjoint.
1375 generate_conjoint_copy(aligned, 8, false);
1376
1377 return __ addr_at(start_off);
1378 }
1379
1380 address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
1381 StubCodeMark mark(this, "StubRoutines", name);
1382 // This is the zarch specific stub generator for overlapping oop array copy.
1383 // Refer to generate_conjoint_copy for a list of prereqs and features.
1384 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
1385 unsigned int size = UseCompressedOops ? 4 : 8;
1386 unsigned int shift = UseCompressedOops ? 2 : 3;
1387
1388 address nooverlap_target = aligned ? StubRoutines::arrayof_oop_disjoint_arraycopy(dest_uninitialized)
1389 : StubRoutines::oop_disjoint_arraycopy(dest_uninitialized);
1390
1391 // Branch to disjoint_copy (if applicable) before pre_barrier to avoid double pre_barrier.
1392 array_overlap_test(nooverlap_target, shift); // Branch away to nooverlap_target if disjoint.
1393
1394 DecoratorSet decorators = 0;
1395 if (dest_uninitialized) {
1396 decorators |= AS_DEST_NOT_INITIALIZED;
1397 }
1398 if (aligned) {
1399 decorators |= ARRAYCOPY_ALIGNED;
1400 }
1401
1402 BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
1403 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, Z_ARG1, Z_ARG2, Z_ARG3);
1404
1405 generate_conjoint_copy(aligned, size, true); // Must preserve ARG2, ARG3.
1406
1407 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, Z_ARG2, Z_ARG3, true);
1408
1409 return __ addr_at(start_off);
1410 }
1411
1412
1413 void generate_arraycopy_stubs() {
1414
1415 // Note: the disjoint stubs must be generated first, some of
1416 // the conjoint stubs use them.
1417 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy (false, "jbyte_disjoint_arraycopy");
1418 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
1419 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy (false, "jint_disjoint_arraycopy");
1420 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy (false, "jlong_disjoint_arraycopy");
1421 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy (false, "oop_disjoint_arraycopy", false);
1422 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (false, "oop_disjoint_arraycopy_uninit", true);
1423
1424 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy (true, "arrayof_jbyte_disjoint_arraycopy");
1425 StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy");
1426 StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy (true, "arrayof_jint_disjoint_arraycopy");
|