src/cpu/sparc/vm/stubGenerator_sparc.cpp
Index
Unified diffs
Context diffs
Sdiffs
Patch
New
Old
Previous File
Next File
g1-bulk-zeroing-reduction Cdiff src/cpu/sparc/vm/stubGenerator_sparc.cpp
src/cpu/sparc/vm/stubGenerator_sparc.cpp
Print this page
*** 2404,2414 ****
address *entry, const char *name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc();
! assert(!aligned, "usage");
assert_clean_int(O2, O3); // Make sure 'count' is clean int.
if (entry != NULL) {
*entry = __ pc();
--- 2404,2414 ----
address *entry, const char *name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc();
! assert(aligned, "Should always be aligned");
assert_clean_int(O2, O3); // Make sure 'count' is clean int.
if (entry != NULL) {
*entry = __ pc();
*** 2433,2443 ****
// Arguments for generated stub:
// from: O0
// to: O1
// count: O2 treated as signed
//
! address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name) {
const Register from = O0; // source array address
const Register to = O1; // destination array address
const Register count = O2; // elements count
--- 2433,2444 ----
// Arguments for generated stub:
// from: O0
// to: O1
// count: O2 treated as signed
//
! address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name,
! bool need_pre_barrier = true) {
const Register from = O0; // source array address
const Register to = O1; // destination array address
const Register count = O2; // elements count
*** 2454,2464 ****
--- 2455,2467 ----
}
// save arguments for barrier generation
__ mov(to, G1);
__ mov(count, G5);
+ if (need_pre_barrier) {
gen_write_ref_array_pre_barrier(G1, G5);
+ }
#ifdef _LP64
assert_clean_int(count, O3); // Make sure 'count' is clean int.
if (UseCompressedOops) {
generate_disjoint_int_copy_core(aligned);
} else {
*** 2484,2494 ****
// from: O0
// to: O1
// count: O2 treated as signed
//
address generate_conjoint_oop_copy(bool aligned, address nooverlap_target,
! address *entry, const char *name) {
const Register from = O0; // source array address
const Register to = O1; // destination array address
const Register count = O2; // elements count
--- 2487,2498 ----
// from: O0
// to: O1
// count: O2 treated as signed
//
address generate_conjoint_oop_copy(bool aligned, address nooverlap_target,
! address *entry, const char *name,
! bool need_pre_barrier = true) {
const Register from = O0; // source array address
const Register to = O1; // destination array address
const Register count = O2; // elements count
*** 2507,2517 ****
--- 2511,2523 ----
array_overlap_test(nooverlap_target, LogBytesPerHeapOop);
// save arguments for barrier generation
__ mov(to, G1);
__ mov(count, G5);
+ if (need_pre_barrier) {
gen_write_ref_array_pre_barrier(G1, G5);
+ }
#ifdef _LP64
if (UseCompressedOops) {
generate_conjoint_int_copy_core(aligned);
} else {
*** 2576,2586 ****
// count: O2 treated as signed
// ckoff: O3 (super_check_offset)
// ckval: O4 (super_klass)
// ret: O0 zero for success; (-1^K) where K is partial transfer count
//
! address generate_checkcast_copy(const char *name, address *entry) {
const Register O0_from = O0; // source array address
const Register O1_to = O1; // destination array address
const Register O2_count = O2; // elements count
const Register O3_ckoff = O3; // super_check_offset
--- 2582,2592 ----
// count: O2 treated as signed
// ckoff: O3 (super_check_offset)
// ckval: O4 (super_klass)
// ret: O0 zero for success; (-1^K) where K is partial transfer count
//
! address generate_checkcast_copy(const char *name, address *entry, bool need_pre_barrier = true) {
const Register O0_from = O0; // source array address
const Register O1_to = O1; // destination array address
const Register O2_count = O2; // elements count
const Register O3_ckoff = O3; // super_check_offset
*** 3081,3140 ****
address entry_jint_arraycopy;
address entry_oop_arraycopy;
address entry_jlong_arraycopy;
address entry_checkcast_arraycopy;
StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry,
"jbyte_disjoint_arraycopy");
! StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy,
"jbyte_arraycopy");
- StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry,
- "jshort_disjoint_arraycopy");
- StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy,
- "jshort_arraycopy");
- StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, &entry,
- "jint_disjoint_arraycopy");
- StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, entry, &entry_jint_arraycopy,
- "jint_arraycopy");
- StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(false, &entry,
- "jlong_disjoint_arraycopy");
- StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy(false, entry, &entry_jlong_arraycopy,
- "jlong_arraycopy");
- StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, &entry,
- "oop_disjoint_arraycopy");
- StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy,
- "oop_arraycopy");
-
-
StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry,
"arrayof_jbyte_disjoint_arraycopy");
StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, NULL,
"arrayof_jbyte_arraycopy");
StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry,
"arrayof_jshort_disjoint_arraycopy");
StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, NULL,
"arrayof_jshort_arraycopy");
StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry,
"arrayof_jint_disjoint_arraycopy");
#ifdef _LP64
! // since sizeof(jint) < sizeof(HeapWord), there's a different flavor:
! StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, entry, NULL, "arrayof_jint_arraycopy");
! #else
! StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy;
#endif
! StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, NULL,
"arrayof_jlong_disjoint_arraycopy");
! StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, NULL,
! "arrayof_oop_disjoint_arraycopy");
! StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy;
! StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy;
StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy",
entry_jbyte_arraycopy,
entry_jshort_arraycopy,
entry_jint_arraycopy,
entry_jlong_arraycopy);
--- 3087,3189 ----
address entry_jint_arraycopy;
address entry_oop_arraycopy;
address entry_jlong_arraycopy;
address entry_checkcast_arraycopy;
+ //*** jbyte
+ // Always need alinged and unaligned versions
StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry,
"jbyte_disjoint_arraycopy");
! StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry,
! &entry_jbyte_arraycopy,
"jbyte_arraycopy");
StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry,
"arrayof_jbyte_disjoint_arraycopy");
StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, NULL,
"arrayof_jbyte_arraycopy");
+ //*** jshort
+ // Always need alinged and unaligned versions
+ StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry,
+ "jshort_disjoint_arraycopy");
+ StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry,
+ &entry_jshort_arraycopy,
+ "jshort_arraycopy");
StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry,
"arrayof_jshort_disjoint_arraycopy");
StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, NULL,
"arrayof_jshort_arraycopy");
+ //*** jint
+ // Aligned versions
StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry,
"arrayof_jint_disjoint_arraycopy");
+ StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy,
+ "arrayof_jint_arraycopy");
#ifdef _LP64
! // In 64 bit we need both aligned and unaligned versions of jint arraycopy.
! // entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it).
! StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, &entry,
! "jint_disjoint_arraycopy");
! StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, entry,
! &entry_jint_arraycopy,
! "jint_arraycopy");
! #else
! // In 32 bit jints are always HeapWordSize aligned, so always use the aligned version
! // (in fact in 32bit we always have a pre-loop part even in the aligned version,
! // because it uses 64-bit loads/stores, so the aligned flag is actually ignored).
! StubRoutines::_jint_disjoint_arraycopy = StubRoutines::_arrayof_jint_disjoint_arraycopy;
! StubRoutines::_jint_arraycopy = StubRoutines::_arrayof_jint_arraycopy;
#endif
!
! //*** jlong
! // It is always aligned
! StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, &entry,
"arrayof_jlong_disjoint_arraycopy");
! StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(true, entry, &entry_jlong_arraycopy,
! "arrayof_jlong_arraycopy");
! StubRoutines::_jlong_disjoint_arraycopy = StubRoutines::_arrayof_jlong_disjoint_arraycopy;
! StubRoutines::_jlong_arraycopy = StubRoutines::_arrayof_jlong_arraycopy;
!
! //*** oops
! // Aligned versions
! StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, &entry,
! "arrayof_oop_disjoint_arraycopy");
! StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(true, entry, &entry_oop_arraycopy,
! "arrayof_oop_arraycopy");
! // Aligned versions without pre-barriers
! StubRoutines::_arrayof_oop_disjoint_arraycopy_no_pre = generate_disjoint_oop_copy(true, &entry,
! "arrayof_oop_disjoint_arraycopy_no_pre", false);
! StubRoutines::_arrayof_oop_arraycopy_no_pre = generate_conjoint_oop_copy(true, entry, NULL,
! "arrayof_oop_arraycopy_no_pre", false);
! #ifdef _LP64
! if (UseCompressedOops) {
! // With compressed oops we need unalinged versions, notice that we overwrite entry_oop_arraycopy.
! StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, &entry,
! "oop_disjoint_arraycopy");
! StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy,
! "oop_arraycopy");
! // Unaligned versions without pre-barriers
! StubRoutines::_oop_disjoint_arraycopy_no_pre = generate_disjoint_oop_copy(false, &entry,
! "oop_disjoint_arraycopy_no_pre", false);
! StubRoutines::_oop_arraycopy_no_pre = generate_conjoint_oop_copy(false, entry, NULL,
! "oop_arraycopy_no_pre", false);
! } else
! #endif
! {
! // oop arraycopy is always aligned on 32bit and 64bit without compressed oops
! StubRoutines::_oop_disjoint_arraycopy = StubRoutines::_arrayof_oop_disjoint_arraycopy;
! StubRoutines::_oop_arraycopy = StubRoutines::_arrayof_oop_arraycopy;
! StubRoutines::_oop_disjoint_arraycopy_no_pre = StubRoutines::_arrayof_oop_disjoint_arraycopy_no_pre;
! StubRoutines::_oop_arraycopy_no_pre = StubRoutines::_arrayof_oop_arraycopy_no_pre;
! }
StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
+ StubRoutines::_checkcast_arraycopy_no_pre = generate_checkcast_copy("checkcast_arraycopy_no_pre", NULL, false);
+
StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy",
entry_jbyte_arraycopy,
entry_jshort_arraycopy,
entry_jint_arraycopy,
entry_jlong_arraycopy);
src/cpu/sparc/vm/stubGenerator_sparc.cpp
Index
Unified diffs
Context diffs
Sdiffs
Patch
New
Old
Previous File
Next File