src/cpu/sparc/vm/stubGenerator_sparc.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File g1-bulk-zeroing-reduction Cdiff src/cpu/sparc/vm/stubGenerator_sparc.cpp

src/cpu/sparc/vm/stubGenerator_sparc.cpp

Print this page

        

*** 1031,1046 **** // count - register containing element count // tmp - scratch register // // The input registers are overwritten. // ! void gen_write_ref_array_pre_barrier(Register addr, Register count) { BarrierSet* bs = Universe::heap()->barrier_set(); ! if (bs->has_write_ref_pre_barrier()) { ! assert(bs->has_write_ref_array_pre_opt(), ! "Else unsupported barrier set."); ! __ save_frame(0); // Save the necessary global regs... will be used after. if (addr->is_global()) { __ mov(addr, L0); } --- 1031,1047 ---- // count - register containing element count // tmp - scratch register // // The input registers are overwritten. // ! void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { BarrierSet* bs = Universe::heap()->barrier_set(); ! switch (bs->kind()) { ! case BarrierSet::G1SATBCT: ! case BarrierSet::G1SATBCTLogging: ! // With G1, don't generate the call if we statically know that the target in uninitialized ! if (!dest_uninitialized) { __ save_frame(0); // Save the necessary global regs... will be used after. if (addr->is_global()) { __ mov(addr, L0); }
*** 1057,1066 **** --- 1058,1075 ---- if (count->is_global()) { __ mov(L1, count); } __ restore(); } + break; + case BarrierSet::CardTableModRef: + case BarrierSet::CardTableExtension: + case BarrierSet::ModRef: + break; + default: + ShouldNotReachHere(); + } } // // Generate post-write barrier for array. // // Input:
*** 2404,2414 **** address *entry, const char *name) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); ! assert(!aligned, "usage"); assert_clean_int(O2, O3); // Make sure 'count' is clean int. if (entry != NULL) { *entry = __ pc(); --- 2413,2423 ---- address *entry, const char *name) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); ! assert(aligned, "Should always be aligned"); assert_clean_int(O2, O3); // Make sure 'count' is clean int. if (entry != NULL) { *entry = __ pc();
*** 2433,2443 **** // Arguments for generated stub: // from: O0 // to: O1 // count: O2 treated as signed // ! address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name) { const Register from = O0; // source array address const Register to = O1; // destination array address const Register count = O2; // elements count --- 2442,2453 ---- // Arguments for generated stub: // from: O0 // to: O1 // count: O2 treated as signed // ! address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name, ! bool dest_uninitialized = false) { const Register from = O0; // source array address const Register to = O1; // destination array address const Register count = O2; // elements count
*** 2454,2464 **** } // save arguments for barrier generation __ mov(to, G1); __ mov(count, G5); ! gen_write_ref_array_pre_barrier(G1, G5); #ifdef _LP64 assert_clean_int(count, O3); // Make sure 'count' is clean int. if (UseCompressedOops) { generate_disjoint_int_copy_core(aligned); } else { --- 2464,2474 ---- } // save arguments for barrier generation __ mov(to, G1); __ mov(count, G5); ! gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized); #ifdef _LP64 assert_clean_int(count, O3); // Make sure 'count' is clean int. if (UseCompressedOops) { generate_disjoint_int_copy_core(aligned); } else {
*** 2484,2494 **** // from: O0 // to: O1 // count: O2 treated as signed // address generate_conjoint_oop_copy(bool aligned, address nooverlap_target, ! address *entry, const char *name) { const Register from = O0; // source array address const Register to = O1; // destination array address const Register count = O2; // elements count --- 2494,2505 ---- // from: O0 // to: O1 // count: O2 treated as signed // address generate_conjoint_oop_copy(bool aligned, address nooverlap_target, ! address *entry, const char *name, ! bool dest_uninitialized = false) { const Register from = O0; // source array address const Register to = O1; // destination array address const Register count = O2; // elements count
*** 2507,2517 **** array_overlap_test(nooverlap_target, LogBytesPerHeapOop); // save arguments for barrier generation __ mov(to, G1); __ mov(count, G5); ! gen_write_ref_array_pre_barrier(G1, G5); #ifdef _LP64 if (UseCompressedOops) { generate_conjoint_int_copy_core(aligned); } else { --- 2518,2528 ---- array_overlap_test(nooverlap_target, LogBytesPerHeapOop); // save arguments for barrier generation __ mov(to, G1); __ mov(count, G5); ! gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized); #ifdef _LP64 if (UseCompressedOops) { generate_conjoint_int_copy_core(aligned); } else {
*** 2576,2586 **** // count: O2 treated as signed // ckoff: O3 (super_check_offset) // ckval: O4 (super_klass) // ret: O0 zero for success; (-1^K) where K is partial transfer count // ! address generate_checkcast_copy(const char *name, address *entry) { const Register O0_from = O0; // source array address const Register O1_to = O1; // destination array address const Register O2_count = O2; // elements count const Register O3_ckoff = O3; // super_check_offset --- 2587,2597 ---- // count: O2 treated as signed // ckoff: O3 (super_check_offset) // ckval: O4 (super_klass) // ret: O0 zero for success; (-1^K) where K is partial transfer count // ! address generate_checkcast_copy(const char *name, address *entry, bool dest_uninitialized = false) { const Register O0_from = O0; // source array address const Register O1_to = O1; // destination array address const Register O2_count = O2; // elements count const Register O3_ckoff = O3; // super_check_offset
*** 2622,2633 **** if (entry != NULL) { *entry = __ pc(); // caller can pass a 64-bit byte count here (from generic stub) BLOCK_COMMENT("Entry:"); } ! ! gen_write_ref_array_pre_barrier(O1_to, O2_count); Label load_element, store_element, do_card_marks, fail, done; __ addcc(O2_count, 0, G1_remain); // initialize loop index, and test it __ brx(Assembler::notZero, false, Assembler::pt, load_element); __ delayed()->mov(G0, O5_offset); // offset from start of arrays --- 2633,2643 ---- if (entry != NULL) { *entry = __ pc(); // caller can pass a 64-bit byte count here (from generic stub) BLOCK_COMMENT("Entry:"); } ! gen_write_ref_array_pre_barrier(O1_to, O2_count, dest_uninitialized); Label load_element, store_element, do_card_marks, fail, done; __ addcc(O2_count, 0, G1_remain); // initialize loop index, and test it __ brx(Assembler::notZero, false, Assembler::pt, load_element); __ delayed()->mov(G0, O5_offset); // offset from start of arrays
*** 3081,3140 **** address entry_jint_arraycopy; address entry_oop_arraycopy; address entry_jlong_arraycopy; address entry_checkcast_arraycopy; StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, "jbyte_disjoint_arraycopy"); ! StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, "jbyte_arraycopy"); - StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, - "jshort_disjoint_arraycopy"); - StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, - "jshort_arraycopy"); - StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, &entry, - "jint_disjoint_arraycopy"); - StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, entry, &entry_jint_arraycopy, - "jint_arraycopy"); - StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(false, &entry, - "jlong_disjoint_arraycopy"); - StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy(false, entry, &entry_jlong_arraycopy, - "jlong_arraycopy"); - StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, &entry, - "oop_disjoint_arraycopy"); - StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy, - "oop_arraycopy"); - - StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry, "arrayof_jbyte_disjoint_arraycopy"); StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, NULL, "arrayof_jbyte_arraycopy"); StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry, "arrayof_jshort_disjoint_arraycopy"); StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, NULL, "arrayof_jshort_arraycopy"); StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry, "arrayof_jint_disjoint_arraycopy"); #ifdef _LP64 ! // since sizeof(jint) < sizeof(HeapWord), there's a different flavor: ! StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, entry, NULL, "arrayof_jint_arraycopy"); ! #else ! StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; #endif ! StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, NULL, "arrayof_jlong_disjoint_arraycopy"); ! StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, NULL, ! "arrayof_oop_disjoint_arraycopy"); ! StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; ! StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", entry_jbyte_arraycopy, entry_jshort_arraycopy, entry_jint_arraycopy, entry_jlong_arraycopy); --- 3091,3198 ---- address entry_jint_arraycopy; address entry_oop_arraycopy; address entry_jlong_arraycopy; address entry_checkcast_arraycopy; + //*** jbyte + // Always need aligned and unaligned versions StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, "jbyte_disjoint_arraycopy"); ! StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, ! &entry_jbyte_arraycopy, "jbyte_arraycopy"); StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry, "arrayof_jbyte_disjoint_arraycopy"); StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, NULL, "arrayof_jbyte_arraycopy"); + //*** jshort + // Always need aligned and unaligned versions + StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, + "jshort_disjoint_arraycopy"); + StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, + &entry_jshort_arraycopy, + "jshort_arraycopy"); StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry, "arrayof_jshort_disjoint_arraycopy"); StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, NULL, "arrayof_jshort_arraycopy"); + //*** jint + // Aligned versions StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry, "arrayof_jint_disjoint_arraycopy"); + StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy, + "arrayof_jint_arraycopy"); #ifdef _LP64 ! // In 64 bit we need both aligned and unaligned versions of jint arraycopy. ! // entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it). ! StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, &entry, ! "jint_disjoint_arraycopy"); ! StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, entry, ! &entry_jint_arraycopy, ! "jint_arraycopy"); ! #else ! // In 32 bit jints are always HeapWordSize aligned, so always use the aligned version ! // (in fact in 32bit we always have a pre-loop part even in the aligned version, ! // because it uses 64-bit loads/stores, so the aligned flag is actually ignored). ! StubRoutines::_jint_disjoint_arraycopy = StubRoutines::_arrayof_jint_disjoint_arraycopy; ! StubRoutines::_jint_arraycopy = StubRoutines::_arrayof_jint_arraycopy; #endif ! ! //*** jlong ! // It is always aligned ! StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, &entry, "arrayof_jlong_disjoint_arraycopy"); ! StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(true, entry, &entry_jlong_arraycopy, ! "arrayof_jlong_arraycopy"); ! StubRoutines::_jlong_disjoint_arraycopy = StubRoutines::_arrayof_jlong_disjoint_arraycopy; ! StubRoutines::_jlong_arraycopy = StubRoutines::_arrayof_jlong_arraycopy; ! ! //*** oops ! // Aligned versions ! StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, &entry, ! "arrayof_oop_disjoint_arraycopy"); ! StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(true, entry, &entry_oop_arraycopy, ! "arrayof_oop_arraycopy"); ! // Aligned versions without pre-barriers ! StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, &entry, ! "arrayof_oop_disjoint_arraycopy_uninit", ! /*dest_uninitialized*/true); ! StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, entry, NULL, ! "arrayof_oop_arraycopy_uninit", ! /*dest_uninitialized*/true); ! #ifdef _LP64 ! if (UseCompressedOops) { ! // With compressed oops we need unaligned versions, notice that we overwrite entry_oop_arraycopy. ! StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, &entry, ! "oop_disjoint_arraycopy"); ! StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy, ! "oop_arraycopy"); ! // Unaligned versions without pre-barriers ! StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(false, &entry, ! "oop_disjoint_arraycopy_uninit", ! /*dest_uninitialized*/true); ! StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, entry, NULL, ! "oop_arraycopy_uninit", ! /*dest_uninitialized*/true); ! } else ! #endif ! { ! // oop arraycopy is always aligned on 32bit and 64bit without compressed oops ! StubRoutines::_oop_disjoint_arraycopy = StubRoutines::_arrayof_oop_disjoint_arraycopy; ! StubRoutines::_oop_arraycopy = StubRoutines::_arrayof_oop_arraycopy; ! StubRoutines::_oop_disjoint_arraycopy_uninit = StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit; ! StubRoutines::_oop_arraycopy_uninit = StubRoutines::_arrayof_oop_arraycopy_uninit; ! } StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); + StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, + /*dest_uninitialized*/true); + StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", entry_jbyte_arraycopy, entry_jshort_arraycopy, entry_jint_arraycopy, entry_jlong_arraycopy);
src/cpu/sparc/vm/stubGenerator_sparc.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File