7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "gc/shared/cardTable.hpp"
28 #include "gc/shared/cardTableBarrierSet.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "nativeInst_ppc.hpp"
31 #include "oops/instanceOop.hpp"
32 #include "oops/method.hpp"
33 #include "oops/objArrayKlass.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "prims/methodHandles.hpp"
36 #include "runtime/frame.inline.hpp"
37 #include "runtime/handles.inline.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/stubCodeGenerator.hpp"
40 #include "runtime/stubRoutines.hpp"
41 #include "runtime/thread.inline.hpp"
42 #include "utilities/align.hpp"
43
44 // Declaration and definition of StubGenerator (no .hpp file).
45 // For a more detailed description of the stub routine structure
46 // see the comment in stubRoutines.hpp.
47
|
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "gc/shared/barrierSet.hpp"
28 #include "gc/shared/barrierSetAssembler.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "nativeInst_ppc.hpp"
31 #include "oops/instanceOop.hpp"
32 #include "oops/method.hpp"
33 #include "oops/objArrayKlass.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "prims/methodHandles.hpp"
36 #include "runtime/frame.inline.hpp"
37 #include "runtime/handles.inline.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/stubCodeGenerator.hpp"
40 #include "runtime/stubRoutines.hpp"
41 #include "runtime/thread.inline.hpp"
42 #include "utilities/align.hpp"
43
44 // Declaration and definition of StubGenerator (no .hpp file).
45 // For a more detailed description of the stub routine structure
46 // see the comment in stubRoutines.hpp.
47
|
594 __ pop_frame();
595
596 __ restore_LR_CR(R11_scratch1);
597
598 __ load_const(R11_scratch1, StubRoutines::forward_exception_entry());
599 __ mtctr(R11_scratch1);
600 __ bctr();
601
602 // Create runtime stub with OopMap.
603 RuntimeStub* stub =
604 RuntimeStub::new_runtime_stub(name, &code,
605 /*frame_complete=*/ (int)(frame_complete_pc - start),
606 frame_size_in_bytes/wordSize,
607 oop_maps,
608 false);
609 return stub->entry_point();
610 }
611 #undef __
612 #define __ _masm->
613
614 // Generate G1 pre-write barrier for array.
615 //
616 // Input:
617 // from - register containing src address (only needed for spilling)
618 // to - register containing starting address
619 // count - register containing element count
620 // tmp - scratch register
621 //
622 // Kills:
623 // nothing
624 //
625 void gen_write_ref_array_pre_barrier(Register from, Register to, Register count, bool dest_uninitialized, Register Rtmp1,
626 Register preserve1 = noreg, Register preserve2 = noreg) {
627 BarrierSet* const bs = Universe::heap()->barrier_set();
628 switch (bs->kind()) {
629 case BarrierSet::G1BarrierSet:
630 // With G1, don't generate the call if we statically know that the target in uninitialized
631 if (!dest_uninitialized) {
632 int spill_slots = 3;
633 if (preserve1 != noreg) { spill_slots++; }
634 if (preserve2 != noreg) { spill_slots++; }
635 const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
636 Label filtered;
637
638 // Is marking active?
639 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
640 __ lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread);
641 } else {
642 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
643 __ lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread);
644 }
645 __ cmpdi(CCR0, Rtmp1, 0);
646 __ beq(CCR0, filtered);
647
648 __ save_LR_CR(R0);
649 __ push_frame(frame_size, R0);
650 int slot_nr = 0;
651 __ std(from, frame_size - (++slot_nr) * wordSize, R1_SP);
652 __ std(to, frame_size - (++slot_nr) * wordSize, R1_SP);
653 __ std(count, frame_size - (++slot_nr) * wordSize, R1_SP);
654 if (preserve1 != noreg) { __ std(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
655 if (preserve2 != noreg) { __ std(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
656
657 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), to, count);
658
659 slot_nr = 0;
660 __ ld(from, frame_size - (++slot_nr) * wordSize, R1_SP);
661 __ ld(to, frame_size - (++slot_nr) * wordSize, R1_SP);
662 __ ld(count, frame_size - (++slot_nr) * wordSize, R1_SP);
663 if (preserve1 != noreg) { __ ld(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
664 if (preserve2 != noreg) { __ ld(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
665 __ addi(R1_SP, R1_SP, frame_size); // pop_frame()
666 __ restore_LR_CR(R0);
667
668 __ bind(filtered);
669 }
670 break;
671 case BarrierSet::CardTableBarrierSet:
672 break;
673 default:
674 ShouldNotReachHere();
675 }
676 }
677
678 // Generate CMS/G1 post-write barrier for array.
679 //
680 // Input:
681 // addr - register containing starting address
682 // count - register containing element count
683 // tmp - scratch register
684 //
685 // The input registers and R0 are overwritten.
686 //
687 void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp, Register preserve = noreg) {
688 BarrierSet* const bs = Universe::heap()->barrier_set();
689
690 switch (bs->kind()) {
691 case BarrierSet::G1BarrierSet:
692 {
693 int spill_slots = (preserve != noreg) ? 1 : 0;
694 const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
695
696 __ save_LR_CR(R0);
697 __ push_frame(frame_size, R0);
698 if (preserve != noreg) { __ std(preserve, frame_size - 1 * wordSize, R1_SP); }
699 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
700 if (preserve != noreg) { __ ld(preserve, frame_size - 1 * wordSize, R1_SP); }
701 __ addi(R1_SP, R1_SP, frame_size); // pop_frame();
702 __ restore_LR_CR(R0);
703 }
704 break;
705 case BarrierSet::CardTableBarrierSet:
706 {
707 Label Lskip_loop, Lstore_loop;
708 if (UseConcMarkSweepGC) {
709 // TODO PPC port: contribute optimization / requires shared changes
710 __ release();
711 }
712
713 CardTableBarrierSet* const ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
714 CardTable* const ct = ctbs->card_table();
715 assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
716 assert_different_registers(addr, count, tmp);
717
718 __ sldi(count, count, LogBytesPerHeapOop);
719 __ addi(count, count, -BytesPerHeapOop);
720 __ add(count, addr, count);
721 // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
722 __ srdi(addr, addr, CardTable::card_shift);
723 __ srdi(count, count, CardTable::card_shift);
724 __ subf(count, addr, count);
725 assert_different_registers(R0, addr, count, tmp);
726 __ load_const(tmp, (address)ct->byte_map_base());
727 __ addic_(count, count, 1);
728 __ beq(CCR0, Lskip_loop);
729 __ li(R0, 0);
730 __ mtctr(count);
731 // Byte store loop
732 __ bind(Lstore_loop);
733 __ stbx(R0, tmp, addr);
734 __ addi(addr, addr, 1);
735 __ bdnz(Lstore_loop);
736 __ bind(Lskip_loop);
737 }
738 break;
739 case BarrierSet::ModRef:
740 break;
741 default:
742 ShouldNotReachHere();
743 }
744 }
745
746 // Support for void zero_words_aligned8(HeapWord* to, size_t count)
747 //
748 // Arguments:
749 // to:
750 // count:
751 //
752 // Destroys:
753 //
754 address generate_zero_words_aligned8() {
755 StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8");
756
757 // Implemented as in ClearArray.
758 address start = __ function_entry();
759
760 Register base_ptr_reg = R3_ARG1; // tohw (needs to be 8b aligned)
761 Register cnt_dwords_reg = R4_ARG2; // count (in dwords)
762 Register tmp1_reg = R5_ARG3;
763 Register tmp2_reg = R6_ARG4;
|
594 __ pop_frame();
595
596 __ restore_LR_CR(R11_scratch1);
597
598 __ load_const(R11_scratch1, StubRoutines::forward_exception_entry());
599 __ mtctr(R11_scratch1);
600 __ bctr();
601
602 // Create runtime stub with OopMap.
603 RuntimeStub* stub =
604 RuntimeStub::new_runtime_stub(name, &code,
605 /*frame_complete=*/ (int)(frame_complete_pc - start),
606 frame_size_in_bytes/wordSize,
607 oop_maps,
608 false);
609 return stub->entry_point();
610 }
611 #undef __
612 #define __ _masm->
613
614
615 // Support for void zero_words_aligned8(HeapWord* to, size_t count)
616 //
617 // Arguments:
618 // to:
619 // count:
620 //
621 // Destroys:
622 //
623 address generate_zero_words_aligned8() {
624 StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8");
625
626 // Implemented as in ClearArray.
627 address start = __ function_entry();
628
629 Register base_ptr_reg = R3_ARG1; // tohw (needs to be 8b aligned)
630 Register cnt_dwords_reg = R4_ARG2; // count (in dwords)
631 Register tmp1_reg = R5_ARG3;
632 Register tmp2_reg = R6_ARG4;
|
2137 }
2138
2139 // Generate stub for conjoint oop copy. If "aligned" is true, the
2140 // "from" and "to" addresses are assumed to be heapword aligned.
2141 //
2142 // Arguments for generated stub:
2143 // from: R3_ARG1
2144 // to: R4_ARG2
2145 // count: R5_ARG3 treated as signed
2146 // dest_uninitialized: G1 support
2147 //
2148 address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
2149 StubCodeMark mark(this, "StubRoutines", name);
2150
2151 address start = __ function_entry();
2152 assert_positive_int(R5_ARG3);
2153 address nooverlap_target = aligned ?
2154 STUB_ENTRY(arrayof_oop_disjoint_arraycopy) :
2155 STUB_ENTRY(oop_disjoint_arraycopy);
2156
2157 gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
2158
2159 // Save arguments.
2160 __ mr(R9_ARG7, R4_ARG2);
2161 __ mr(R10_ARG8, R5_ARG3);
2162
2163 if (UseCompressedOops) {
2164 array_overlap_test(nooverlap_target, 2);
2165 generate_conjoint_int_copy_core(aligned);
2166 } else {
2167 array_overlap_test(nooverlap_target, 3);
2168 generate_conjoint_long_copy_core(aligned);
2169 }
2170
2171 gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1);
2172 __ li(R3_RET, 0); // return 0
2173 __ blr();
2174 return start;
2175 }
2176
2177 // Generate stub for disjoint oop copy. If "aligned" is true, the
2178 // "from" and "to" addresses are assumed to be heapword aligned.
2179 //
2180 // Arguments for generated stub:
2181 // from: R3_ARG1
2182 // to: R4_ARG2
2183 // count: R5_ARG3 treated as signed
2184 // dest_uninitialized: G1 support
2185 //
2186 address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
2187 StubCodeMark mark(this, "StubRoutines", name);
2188 address start = __ function_entry();
2189 assert_positive_int(R5_ARG3);
2190 gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
2191
2192 // save some arguments, disjoint_long_copy_core destroys them.
2193 // needed for post barrier
2194 __ mr(R9_ARG7, R4_ARG2);
2195 __ mr(R10_ARG8, R5_ARG3);
2196
2197 if (UseCompressedOops) {
2198 generate_disjoint_int_copy_core(aligned);
2199 } else {
2200 generate_disjoint_long_copy_core(aligned);
2201 }
2202
2203 gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1);
2204 __ li(R3_RET, 0); // return 0
2205 __ blr();
2206
2207 return start;
2208 }
2209
2210
2211 // Helper for generating a dynamic type check.
2212 // Smashes only the given temp registers.
2213 void generate_type_check(Register sub_klass,
2214 Register super_check_offset,
2215 Register super_klass,
2216 Register temp,
2217 Label& L_success) {
2218 assert_different_registers(sub_klass, super_check_offset, super_klass);
2219
2220 BLOCK_COMMENT("type_check:");
2221
2222 Label L_miss;
|
2006 }
2007
2008 // Generate stub for conjoint oop copy. If "aligned" is true, the
2009 // "from" and "to" addresses are assumed to be heapword aligned.
2010 //
2011 // Arguments for generated stub:
2012 // from: R3_ARG1
2013 // to: R4_ARG2
2014 // count: R5_ARG3 treated as signed
2015 // dest_uninitialized: G1 support
2016 //
2017 address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
2018 StubCodeMark mark(this, "StubRoutines", name);
2019
2020 address start = __ function_entry();
2021 assert_positive_int(R5_ARG3);
2022 address nooverlap_target = aligned ?
2023 STUB_ENTRY(arrayof_oop_disjoint_arraycopy) :
2024 STUB_ENTRY(oop_disjoint_arraycopy);
2025
2026 DecoratorSet decorators = 0;
2027 if (dest_uninitialized) {
2028 decorators |= AS_DEST_NOT_INITIALIZED;
2029 }
2030 if (aligned) {
2031 decorators |= ARRAYCOPY_ALIGNED;
2032 }
2033
2034 BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
2035 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_ARG1, R4_ARG2, R5_ARG3, noreg, noreg);
2036
2037 if (UseCompressedOops) {
2038 array_overlap_test(nooverlap_target, 2);
2039 generate_conjoint_int_copy_core(aligned);
2040 } else {
2041 array_overlap_test(nooverlap_target, 3);
2042 generate_conjoint_long_copy_core(aligned);
2043 }
2044
2045 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_ARG2, R5_ARG3, noreg);
2046 __ li(R3_RET, 0); // return 0
2047 __ blr();
2048 return start;
2049 }
2050
2051 // Generate stub for disjoint oop copy. If "aligned" is true, the
2052 // "from" and "to" addresses are assumed to be heapword aligned.
2053 //
2054 // Arguments for generated stub:
2055 // from: R3_ARG1
2056 // to: R4_ARG2
2057 // count: R5_ARG3 treated as signed
2058 // dest_uninitialized: G1 support
2059 //
2060 address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
2061 StubCodeMark mark(this, "StubRoutines", name);
2062 address start = __ function_entry();
2063 assert_positive_int(R5_ARG3);
2064
2065 DecoratorSet decorators = ARRAYCOPY_DISJOINT;
2066 if (dest_uninitialized) {
2067 decorators |= AS_DEST_NOT_INITIALIZED;
2068 }
2069 if (aligned) {
2070 decorators |= ARRAYCOPY_ALIGNED;
2071 }
2072
2073 BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
2074 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_ARG1, R4_ARG2, R5_ARG3, noreg, noreg);
2075
2076 if (UseCompressedOops) {
2077 generate_disjoint_int_copy_core(aligned);
2078 } else {
2079 generate_disjoint_long_copy_core(aligned);
2080 }
2081
2082 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_ARG2, R5_ARG3, noreg);
2083 __ li(R3_RET, 0); // return 0
2084 __ blr();
2085
2086 return start;
2087 }
2088
2089
2090 // Helper for generating a dynamic type check.
2091 // Smashes only the given temp registers.
2092 void generate_type_check(Register sub_klass,
2093 Register super_check_offset,
2094 Register super_klass,
2095 Register temp,
2096 Label& L_success) {
2097 assert_different_registers(sub_klass, super_check_offset, super_klass);
2098
2099 BLOCK_COMMENT("type_check:");
2100
2101 Label L_miss;
|
2262
2263 // Assert that int is 64 bit sign extended and arrays are not conjoint.
2264 #ifdef ASSERT
2265 {
2266 assert_positive_int(R5_ARG3);
2267 const Register tmp1 = R11_scratch1, tmp2 = R12_scratch2;
2268 Label no_overlap;
2269 __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes
2270 __ sldi(tmp2, R5_ARG3, LogBytesPerHeapOop); // size in bytes
2271 __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
2272 __ cmpld(CCR1, tmp1, tmp2);
2273 __ crnand(CCR0, Assembler::less, CCR1, Assembler::less);
2274 // Overlaps if Src before dst and distance smaller than size.
2275 // Branch to forward copy routine otherwise.
2276 __ blt(CCR0, no_overlap);
2277 __ stop("overlap in checkcast_copy", 0x9543);
2278 __ bind(no_overlap);
2279 }
2280 #endif
2281
2282 gen_write_ref_array_pre_barrier(R3_from, R4_to, R5_count, dest_uninitialized, R12_tmp, /* preserve: */ R6_ckoff, R7_ckval);
2283
2284 //inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R12_tmp, R3_RET);
2285
2286 Label load_element, store_element, store_null, success, do_card_marks;
2287 __ or_(R9_remain, R5_count, R5_count); // Initialize loop index, and test it.
2288 __ li(R8_offset, 0); // Offset from start of arrays.
2289 __ li(R2_minus1, -1);
2290 __ bne(CCR0, load_element);
2291
2292 // Empty array: Nothing to do.
2293 __ li(R3_RET, 0); // Return 0 on (trivial) success.
2294 __ blr();
2295
2296 // ======== begin loop ========
2297 // (Entry is load_element.)
2298 __ align(OptoLoopAlignment);
2299 __ bind(store_element);
2300 if (UseCompressedOops) {
2301 __ encode_heap_oop_not_null(R10_oop);
2302 __ bind(store_null);
2303 __ stw(R10_oop, R8_offset, R4_to);
2304 } else {
2305 __ bind(store_null);
|
2141
2142 // Assert that int is 64 bit sign extended and arrays are not conjoint.
2143 #ifdef ASSERT
2144 {
2145 assert_positive_int(R5_ARG3);
2146 const Register tmp1 = R11_scratch1, tmp2 = R12_scratch2;
2147 Label no_overlap;
2148 __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes
2149 __ sldi(tmp2, R5_ARG3, LogBytesPerHeapOop); // size in bytes
2150 __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
2151 __ cmpld(CCR1, tmp1, tmp2);
2152 __ crnand(CCR0, Assembler::less, CCR1, Assembler::less);
2153 // Overlaps if Src before dst and distance smaller than size.
2154 // Branch to forward copy routine otherwise.
2155 __ blt(CCR0, no_overlap);
2156 __ stop("overlap in checkcast_copy", 0x9543);
2157 __ bind(no_overlap);
2158 }
2159 #endif
2160
2161 DecoratorSet decorators = ARRAYCOPY_CHECKCAST;
2162 if (dest_uninitialized) {
2163 decorators |= AS_DEST_NOT_INITIALIZED;
2164 }
2165
2166 BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
2167 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_from, R4_to, R5_count, /* preserve: */ R6_ckoff, R7_ckval);
2168
2169 //inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R12_tmp, R3_RET);
2170
2171 Label load_element, store_element, store_null, success, do_epilogue;
2172 __ or_(R9_remain, R5_count, R5_count); // Initialize loop index, and test it.
2173 __ li(R8_offset, 0); // Offset from start of arrays.
2174 __ li(R2_minus1, -1);
2175 __ bne(CCR0, load_element);
2176
2177 // Empty array: Nothing to do.
2178 __ li(R3_RET, 0); // Return 0 on (trivial) success.
2179 __ blr();
2180
2181 // ======== begin loop ========
2182 // (Entry is load_element.)
2183 __ align(OptoLoopAlignment);
2184 __ bind(store_element);
2185 if (UseCompressedOops) {
2186 __ encode_heap_oop_not_null(R10_oop);
2187 __ bind(store_null);
2188 __ stw(R10_oop, R8_offset, R4_to);
2189 } else {
2190 __ bind(store_null);
|
2310 __ add_(R9_remain, R2_minus1, R9_remain); // Decrement the count.
2311 __ beq(CCR0, success);
2312
2313 // ======== loop entry is here ========
2314 __ bind(load_element);
2315 __ load_heap_oop(R10_oop, R8_offset, R3_from, &store_null); // Load the oop.
2316
2317 __ load_klass(R11_klass, R10_oop); // Query the object klass.
2318
2319 generate_type_check(R11_klass, R6_ckoff, R7_ckval, R12_tmp,
2320 // Branch to this on success:
2321 store_element);
2322 // ======== end loop ========
2323
2324 // It was a real error; we must depend on the caller to finish the job.
2325 // Register R9_remain has number of *remaining* oops, R5_count number of *total* oops.
2326 // Emit GC store barriers for the oops we have copied (R5_count minus R9_remain),
2327 // and report their number to the caller.
2328 __ subf_(R5_count, R9_remain, R5_count);
2329 __ nand(R3_RET, R5_count, R5_count); // report (-1^K) to caller
2330 __ bne(CCR0, do_card_marks);
2331 __ blr();
2332
2333 __ bind(success);
2334 __ li(R3_RET, 0);
2335
2336 __ bind(do_card_marks);
2337 // Store check on R4_to[0..R5_count-1].
2338 gen_write_ref_array_post_barrier(R4_to, R5_count, R12_tmp, /* preserve: */ R3_RET);
2339 __ blr();
2340 return start;
2341 }
2342
2343
2344 // Generate 'unsafe' array copy stub.
2345 // Though just as safe as the other stubs, it takes an unscaled
2346 // size_t argument instead of an element count.
2347 //
2348 // Arguments for generated stub:
2349 // from: R3
2350 // to: R4
2351 // count: R5 byte count, treated as ssize_t, can be zero
2352 //
2353 // Examines the alignment of the operands and dispatches
2354 // to a long, int, short, or byte copy loop.
2355 //
2356 address generate_unsafe_copy(const char* name,
2357 address byte_copy_entry,
|
2195 __ add_(R9_remain, R2_minus1, R9_remain); // Decrement the count.
2196 __ beq(CCR0, success);
2197
2198 // ======== loop entry is here ========
2199 __ bind(load_element);
2200 __ load_heap_oop(R10_oop, R8_offset, R3_from, &store_null); // Load the oop.
2201
2202 __ load_klass(R11_klass, R10_oop); // Query the object klass.
2203
2204 generate_type_check(R11_klass, R6_ckoff, R7_ckval, R12_tmp,
2205 // Branch to this on success:
2206 store_element);
2207 // ======== end loop ========
2208
2209 // It was a real error; we must depend on the caller to finish the job.
2210 // Register R9_remain has number of *remaining* oops, R5_count number of *total* oops.
2211 // Emit GC store barriers for the oops we have copied (R5_count minus R9_remain),
2212 // and report their number to the caller.
2213 __ subf_(R5_count, R9_remain, R5_count);
2214 __ nand(R3_RET, R5_count, R5_count); // report (-1^K) to caller
2215 __ bne(CCR0, do_epilogue);
2216 __ blr();
2217
2218 __ bind(success);
2219 __ li(R3_RET, 0);
2220
2221 __ bind(do_epilogue);
2222 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_to, R5_count, /* preserve */ R3_RET);
2223
2224 __ blr();
2225 return start;
2226 }
2227
2228
2229 // Generate 'unsafe' array copy stub.
2230 // Though just as safe as the other stubs, it takes an unscaled
2231 // size_t argument instead of an element count.
2232 //
2233 // Arguments for generated stub:
2234 // from: R3
2235 // to: R4
2236 // count: R5 byte count, treated as ssize_t, can be zero
2237 //
2238 // Examines the alignment of the operands and dispatches
2239 // to a long, int, short, or byte copy loop.
2240 //
2241 address generate_unsafe_copy(const char* name,
2242 address byte_copy_entry,
|