src/cpu/sparc/vm/stubGenerator_sparc.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File g1-bulk-zeroing-reduction Sdiff src/cpu/sparc/vm

src/cpu/sparc/vm/stubGenerator_sparc.cpp

Print this page




2389       __ ldx(from, 0, O3);
2390       __ stx(O3, to, 0);
2391     __ BIND(L_exit);
2392   }
2393 
2394   //  Generate stub for conjoint long copy.
2395   //  "aligned" is ignored, because we must make the stronger
2396   //  assumption that both addresses are always 64-bit aligned.
2397   //
2398   // Arguments for generated stub:
2399   //      from:  O0
2400   //      to:    O1
2401   //      count: O2 treated as signed
2402   //
2403   address generate_conjoint_long_copy(bool aligned, address nooverlap_target,
2404                                       address *entry, const char *name) {
2405     __ align(CodeEntryAlignment);
2406     StubCodeMark mark(this, "StubRoutines", name);
2407     address start = __ pc();
2408 
2409     assert(!aligned, "usage");
2410 
2411     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
2412 
2413     if (entry != NULL) {
2414       *entry = __ pc();
2415       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2416       BLOCK_COMMENT("Entry:");
2417     }
2418 
2419     array_overlap_test(nooverlap_target, 3);
2420 
2421     generate_conjoint_long_copy_core(aligned);
2422 
2423     // O3, O4 are used as temp registers
2424     inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
2425     __ retl();
2426     __ delayed()->mov(G0, O0); // return 0
2427     return start;
2428   }
2429 
2430   //  Generate stub for disjoint oop copy.  If "aligned" is true, the
2431   //  "from" and "to" addresses are assumed to be heapword aligned.
2432   //
2433   // Arguments for generated stub:
2434   //      from:  O0
2435   //      to:    O1
2436   //      count: O2 treated as signed
2437   //
2438   address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name) {

2439 
2440     const Register from  = O0;  // source array address
2441     const Register to    = O1;  // destination array address
2442     const Register count = O2;  // elements count
2443 
2444     __ align(CodeEntryAlignment);
2445     StubCodeMark mark(this, "StubRoutines", name);
2446     address start = __ pc();
2447 
2448     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2449 
2450     if (entry != NULL) {
2451       *entry = __ pc();
2452       // caller can pass a 64-bit byte count here
2453       BLOCK_COMMENT("Entry:");
2454     }
2455 
2456     // save arguments for barrier generation
2457     __ mov(to, G1);
2458     __ mov(count, G5);

2459     gen_write_ref_array_pre_barrier(G1, G5);

2460   #ifdef _LP64
2461     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2462     if (UseCompressedOops) {
2463       generate_disjoint_int_copy_core(aligned);
2464     } else {
2465       generate_disjoint_long_copy_core(aligned);
2466     }
2467   #else
2468     generate_disjoint_int_copy_core(aligned);
2469   #endif
2470     // O0 is used as temp register
2471     gen_write_ref_array_post_barrier(G1, G5, O0);
2472 
2473     // O3, O4 are used as temp registers
2474     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
2475     __ retl();
2476     __ delayed()->mov(G0, O0); // return 0
2477     return start;
2478   }
2479 
2480   //  Generate stub for conjoint oop copy.  If "aligned" is true, the
2481   //  "from" and "to" addresses are assumed to be heapword aligned.
2482   //
2483   // Arguments for generated stub:
2484   //      from:  O0
2485   //      to:    O1
2486   //      count: O2 treated as signed
2487   //
2488   address generate_conjoint_oop_copy(bool aligned, address nooverlap_target,
2489                                      address *entry, const char *name) {

2490 
2491     const Register from  = O0;  // source array address
2492     const Register to    = O1;  // destination array address
2493     const Register count = O2;  // elements count
2494 
2495     __ align(CodeEntryAlignment);
2496     StubCodeMark mark(this, "StubRoutines", name);
2497     address start = __ pc();
2498 
2499     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2500 
2501     if (entry != NULL) {
2502       *entry = __ pc();
2503       // caller can pass a 64-bit byte count here
2504       BLOCK_COMMENT("Entry:");
2505     }
2506 
2507     array_overlap_test(nooverlap_target, LogBytesPerHeapOop);
2508 
2509     // save arguments for barrier generation
2510     __ mov(to, G1);
2511     __ mov(count, G5);

2512     gen_write_ref_array_pre_barrier(G1, G5);

2513 
2514   #ifdef _LP64
2515     if (UseCompressedOops) {
2516       generate_conjoint_int_copy_core(aligned);
2517     } else {
2518       generate_conjoint_long_copy_core(aligned);
2519     }
2520   #else
2521     generate_conjoint_int_copy_core(aligned);
2522   #endif
2523 
2524     // O0 is used as temp register
2525     gen_write_ref_array_post_barrier(G1, G5, O0);
2526 
2527     // O3, O4 are used as temp registers
2528     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
2529     __ retl();
2530     __ delayed()->mov(G0, O0); // return 0
2531     return start;
2532   }


2561     __ delayed()->restore();
2562 
2563     __ bind(L_pop_to_miss);
2564     __ restore();
2565 
2566     // Fall through on failure!
2567     __ BIND(L_miss);
2568   }
2569 
2570 
2571   //  Generate stub for checked oop copy.
2572   //
2573   // Arguments for generated stub:
2574   //      from:  O0
2575   //      to:    O1
2576   //      count: O2 treated as signed
2577   //      ckoff: O3 (super_check_offset)
2578   //      ckval: O4 (super_klass)
2579   //      ret:   O0 zero for success; (-1^K) where K is partial transfer count
2580   //
2581   address generate_checkcast_copy(const char *name, address *entry) {
2582 
2583     const Register O0_from   = O0;      // source array address
2584     const Register O1_to     = O1;      // destination array address
2585     const Register O2_count  = O2;      // elements count
2586     const Register O3_ckoff  = O3;      // super_check_offset
2587     const Register O4_ckval  = O4;      // super_klass
2588 
2589     const Register O5_offset = O5;      // loop var, with stride wordSize
2590     const Register G1_remain = G1;      // loop var, with stride -1
2591     const Register G3_oop    = G3;      // actual oop copied
2592     const Register G4_klass  = G4;      // oop._klass
2593     const Register G5_super  = G5;      // oop._klass._primary_supers[ckval]
2594 
2595     __ align(CodeEntryAlignment);
2596     StubCodeMark mark(this, "StubRoutines", name);
2597     address start = __ pc();
2598 
2599 #ifdef ASSERT
2600     // We sometimes save a frame (see generate_type_check below).
2601     // If this will cause trouble, let's fail now instead of later.


3066 
3067       __ br(Assembler::always, false, Assembler::pt, entry_checkcast_arraycopy);
3068       __ delayed()->lduw(O4, sco_offset, O3);
3069     }
3070 
3071   __ BIND(L_failed);
3072     __ retl();
3073     __ delayed()->sub(G0, 1, O0); // return -1
3074     return start;
3075   }
3076 
3077   void generate_arraycopy_stubs() {
3078     address entry;
3079     address entry_jbyte_arraycopy;
3080     address entry_jshort_arraycopy;
3081     address entry_jint_arraycopy;
3082     address entry_oop_arraycopy;
3083     address entry_jlong_arraycopy;
3084     address entry_checkcast_arraycopy;
3085 


3086     StubRoutines::_jbyte_disjoint_arraycopy  = generate_disjoint_byte_copy(false, &entry,
3087                                                                            "jbyte_disjoint_arraycopy");
3088     StubRoutines::_jbyte_arraycopy           = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy,

3089                                                                            "jbyte_arraycopy");
3090     StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry,
3091                                                                             "jshort_disjoint_arraycopy");
3092     StubRoutines::_jshort_arraycopy          = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy,
3093                                                                             "jshort_arraycopy");
3094     StubRoutines::_jint_disjoint_arraycopy   = generate_disjoint_int_copy(false, &entry,
3095                                                                           "jint_disjoint_arraycopy");
3096     StubRoutines::_jint_arraycopy            = generate_conjoint_int_copy(false, entry, &entry_jint_arraycopy,
3097                                                                           "jint_arraycopy");
3098     StubRoutines::_jlong_disjoint_arraycopy  = generate_disjoint_long_copy(false, &entry,
3099                                                                            "jlong_disjoint_arraycopy");
3100     StubRoutines::_jlong_arraycopy           = generate_conjoint_long_copy(false, entry, &entry_jlong_arraycopy,
3101                                                                            "jlong_arraycopy");
3102     StubRoutines::_oop_disjoint_arraycopy    = generate_disjoint_oop_copy(false, &entry,
3103                                                                           "oop_disjoint_arraycopy");
3104     StubRoutines::_oop_arraycopy             = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy,
3105                                                                           "oop_arraycopy");
3106 
3107 
3108     StubRoutines::_arrayof_jbyte_disjoint_arraycopy  = generate_disjoint_byte_copy(true, &entry,
3109                                                                                    "arrayof_jbyte_disjoint_arraycopy");
3110     StubRoutines::_arrayof_jbyte_arraycopy           = generate_conjoint_byte_copy(true, entry, NULL,
3111                                                                                    "arrayof_jbyte_arraycopy");
3112 







3113     StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry,
3114                                                                                     "arrayof_jshort_disjoint_arraycopy");
3115     StubRoutines::_arrayof_jshort_arraycopy          = generate_conjoint_short_copy(true, entry, NULL,
3116                                                                                     "arrayof_jshort_arraycopy");
3117 


3118     StubRoutines::_arrayof_jint_disjoint_arraycopy   = generate_disjoint_int_copy(true, &entry,
3119                                                                                   "arrayof_jint_disjoint_arraycopy");


3120 #ifdef _LP64
3121     // since sizeof(jint) < sizeof(HeapWord), there's a different flavor:
3122     StubRoutines::_arrayof_jint_arraycopy     = generate_conjoint_int_copy(true, entry, NULL, "arrayof_jint_arraycopy");
3123   #else
3124     StubRoutines::_arrayof_jint_arraycopy     = StubRoutines::_jint_arraycopy;









3125 #endif
3126 
3127     StubRoutines::_arrayof_jlong_disjoint_arraycopy  = generate_disjoint_long_copy(true, NULL,



3128                                                                                    "arrayof_jlong_disjoint_arraycopy");
3129     StubRoutines::_arrayof_oop_disjoint_arraycopy    =  generate_disjoint_oop_copy(true, NULL,
3130                                                                                    "arrayof_oop_disjoint_arraycopy");



3131 
3132     StubRoutines::_arrayof_jlong_arraycopy    = StubRoutines::_jlong_arraycopy;
3133     StubRoutines::_arrayof_oop_arraycopy      = StubRoutines::_oop_arraycopy;






























3134 
3135     StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);


3136     StubRoutines::_unsafe_arraycopy    = generate_unsafe_copy("unsafe_arraycopy",
3137                                                               entry_jbyte_arraycopy,
3138                                                               entry_jshort_arraycopy,
3139                                                               entry_jint_arraycopy,
3140                                                               entry_jlong_arraycopy);
3141     StubRoutines::_generic_arraycopy   = generate_generic_copy("generic_arraycopy",
3142                                                                entry_jbyte_arraycopy,
3143                                                                entry_jshort_arraycopy,
3144                                                                entry_jint_arraycopy,
3145                                                                entry_oop_arraycopy,
3146                                                                entry_jlong_arraycopy,
3147                                                                entry_checkcast_arraycopy);
3148 
3149     StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
3150     StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
3151     StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
3152     StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
3153     StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
3154     StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
3155   }




2389       __ ldx(from, 0, O3);
2390       __ stx(O3, to, 0);
2391     __ BIND(L_exit);
2392   }
2393 
2394   //  Generate stub for conjoint long copy.
2395   //  "aligned" is ignored, because we must make the stronger
2396   //  assumption that both addresses are always 64-bit aligned.
2397   //
2398   // Arguments for generated stub:
2399   //      from:  O0
2400   //      to:    O1
2401   //      count: O2 treated as signed
2402   //
2403   address generate_conjoint_long_copy(bool aligned, address nooverlap_target,
2404                                       address *entry, const char *name) {
2405     __ align(CodeEntryAlignment);
2406     StubCodeMark mark(this, "StubRoutines", name);
2407     address start = __ pc();
2408 
2409     assert(aligned, "Should always be aligned");
2410 
2411     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
2412 
2413     if (entry != NULL) {
2414       *entry = __ pc();
2415       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2416       BLOCK_COMMENT("Entry:");
2417     }
2418 
2419     array_overlap_test(nooverlap_target, 3);
2420 
2421     generate_conjoint_long_copy_core(aligned);
2422 
2423     // O3, O4 are used as temp registers
2424     inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
2425     __ retl();
2426     __ delayed()->mov(G0, O0); // return 0
2427     return start;
2428   }
2429 
2430   //  Generate stub for disjoint oop copy.  If "aligned" is true, the
2431   //  "from" and "to" addresses are assumed to be heapword aligned.
2432   //
2433   // Arguments for generated stub:
2434   //      from:  O0
2435   //      to:    O1
2436   //      count: O2 treated as signed
2437   //
2438   address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name,
2439                                      bool need_pre_barrier = true) {
2440 
2441     const Register from  = O0;  // source array address
2442     const Register to    = O1;  // destination array address
2443     const Register count = O2;  // elements count
2444 
2445     __ align(CodeEntryAlignment);
2446     StubCodeMark mark(this, "StubRoutines", name);
2447     address start = __ pc();
2448 
2449     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2450 
2451     if (entry != NULL) {
2452       *entry = __ pc();
2453       // caller can pass a 64-bit byte count here
2454       BLOCK_COMMENT("Entry:");
2455     }
2456 
2457     // save arguments for barrier generation
2458     __ mov(to, G1);
2459     __ mov(count, G5);
2460     if (need_pre_barrier) {
2461       gen_write_ref_array_pre_barrier(G1, G5);
2462     }
2463   #ifdef _LP64
2464     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2465     if (UseCompressedOops) {
2466       generate_disjoint_int_copy_core(aligned);
2467     } else {
2468       generate_disjoint_long_copy_core(aligned);
2469     }
2470   #else
2471     generate_disjoint_int_copy_core(aligned);
2472   #endif
2473     // O0 is used as temp register
2474     gen_write_ref_array_post_barrier(G1, G5, O0);
2475 
2476     // O3, O4 are used as temp registers
2477     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
2478     __ retl();
2479     __ delayed()->mov(G0, O0); // return 0
2480     return start;
2481   }
2482 
2483   //  Generate stub for conjoint oop copy.  If "aligned" is true, the
2484   //  "from" and "to" addresses are assumed to be heapword aligned.
2485   //
2486   // Arguments for generated stub:
2487   //      from:  O0
2488   //      to:    O1
2489   //      count: O2 treated as signed
2490   //
2491   address generate_conjoint_oop_copy(bool aligned, address nooverlap_target,
2492                                      address *entry, const char *name,
2493                                      bool need_pre_barrier = true) {
2494 
2495     const Register from  = O0;  // source array address
2496     const Register to    = O1;  // destination array address
2497     const Register count = O2;  // elements count
2498 
2499     __ align(CodeEntryAlignment);
2500     StubCodeMark mark(this, "StubRoutines", name);
2501     address start = __ pc();
2502 
2503     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2504 
2505     if (entry != NULL) {
2506       *entry = __ pc();
2507       // caller can pass a 64-bit byte count here
2508       BLOCK_COMMENT("Entry:");
2509     }
2510 
2511     array_overlap_test(nooverlap_target, LogBytesPerHeapOop);
2512 
2513     // save arguments for barrier generation
2514     __ mov(to, G1);
2515     __ mov(count, G5);
2516     if (need_pre_barrier) {
2517       gen_write_ref_array_pre_barrier(G1, G5);
2518     }
2519 
2520   #ifdef _LP64
2521     if (UseCompressedOops) {
2522       generate_conjoint_int_copy_core(aligned);
2523     } else {
2524       generate_conjoint_long_copy_core(aligned);
2525     }
2526   #else
2527     generate_conjoint_int_copy_core(aligned);
2528   #endif
2529 
2530     // O0 is used as temp register
2531     gen_write_ref_array_post_barrier(G1, G5, O0);
2532 
2533     // O3, O4 are used as temp registers
2534     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
2535     __ retl();
2536     __ delayed()->mov(G0, O0); // return 0
2537     return start;
2538   }


2567     __ delayed()->restore();
2568 
2569     __ bind(L_pop_to_miss);
2570     __ restore();
2571 
2572     // Fall through on failure!
2573     __ BIND(L_miss);
2574   }
2575 
2576 
2577   //  Generate stub for checked oop copy.
2578   //
2579   // Arguments for generated stub:
2580   //      from:  O0
2581   //      to:    O1
2582   //      count: O2 treated as signed
2583   //      ckoff: O3 (super_check_offset)
2584   //      ckval: O4 (super_klass)
2585   //      ret:   O0 zero for success; (-1^K) where K is partial transfer count
2586   //
2587   address generate_checkcast_copy(const char *name, address *entry, bool need_pre_barrier = true) {
2588 
2589     const Register O0_from   = O0;      // source array address
2590     const Register O1_to     = O1;      // destination array address
2591     const Register O2_count  = O2;      // elements count
2592     const Register O3_ckoff  = O3;      // super_check_offset
2593     const Register O4_ckval  = O4;      // super_klass
2594 
2595     const Register O5_offset = O5;      // loop var, with stride wordSize
2596     const Register G1_remain = G1;      // loop var, with stride -1
2597     const Register G3_oop    = G3;      // actual oop copied
2598     const Register G4_klass  = G4;      // oop._klass
2599     const Register G5_super  = G5;      // oop._klass._primary_supers[ckval]
2600 
2601     __ align(CodeEntryAlignment);
2602     StubCodeMark mark(this, "StubRoutines", name);
2603     address start = __ pc();
2604 
2605 #ifdef ASSERT
2606     // We sometimes save a frame (see generate_type_check below).
2607     // If this will cause trouble, let's fail now instead of later.


3072 
3073       __ br(Assembler::always, false, Assembler::pt, entry_checkcast_arraycopy);
3074       __ delayed()->lduw(O4, sco_offset, O3);
3075     }
3076 
3077   __ BIND(L_failed);
3078     __ retl();
3079     __ delayed()->sub(G0, 1, O0); // return -1
3080     return start;
3081   }
3082 
3083   void generate_arraycopy_stubs() {
3084     address entry;
3085     address entry_jbyte_arraycopy;
3086     address entry_jshort_arraycopy;
3087     address entry_jint_arraycopy;
3088     address entry_oop_arraycopy;
3089     address entry_jlong_arraycopy;
3090     address entry_checkcast_arraycopy;
3091 
3092     //*** jbyte
3093     // Always need alinged and unaligned versions
3094     StubRoutines::_jbyte_disjoint_arraycopy         = generate_disjoint_byte_copy(false, &entry,
3095                                                                                   "jbyte_disjoint_arraycopy");
3096     StubRoutines::_jbyte_arraycopy                  = generate_conjoint_byte_copy(false, entry,
3097                                                                                   &entry_jbyte_arraycopy,
3098                                                                                   "jbyte_arraycopy");


















3099     StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry,
3100                                                                                   "arrayof_jbyte_disjoint_arraycopy");
3101     StubRoutines::_arrayof_jbyte_arraycopy          = generate_conjoint_byte_copy(true, entry, NULL,
3102                                                                                   "arrayof_jbyte_arraycopy");
3103 
3104     //*** jshort
3105     // Always need alinged and unaligned versions
3106     StubRoutines::_jshort_disjoint_arraycopy         = generate_disjoint_short_copy(false, &entry,
3107                                                                                     "jshort_disjoint_arraycopy");
3108     StubRoutines::_jshort_arraycopy                  = generate_conjoint_short_copy(false, entry,
3109                                                                                     &entry_jshort_arraycopy,
3110                                                                                     "jshort_arraycopy");
3111     StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry,
3112                                                                                     "arrayof_jshort_disjoint_arraycopy");
3113     StubRoutines::_arrayof_jshort_arraycopy          = generate_conjoint_short_copy(true, entry, NULL,
3114                                                                                     "arrayof_jshort_arraycopy");
3115 
3116     //*** jint
3117     // Aligned versions
3118     StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry,
3119                                                                                 "arrayof_jint_disjoint_arraycopy");
3120     StubRoutines::_arrayof_jint_arraycopy          = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy,
3121                                                                                 "arrayof_jint_arraycopy");
3122 #ifdef _LP64
3123     // In 64 bit we need both aligned and unaligned versions of jint arraycopy.
3124     // entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it).
3125     StubRoutines::_jint_disjoint_arraycopy         = generate_disjoint_int_copy(false, &entry,
3126                                                                                 "jint_disjoint_arraycopy");
3127     StubRoutines::_jint_arraycopy                  = generate_conjoint_int_copy(false, entry,
3128                                                                                 &entry_jint_arraycopy,
3129                                                                                 "jint_arraycopy");
3130 #else
3131     // In 32 bit jints are always HeapWordSize aligned, so always use the aligned version
3132     // (in fact in 32bit we always have a pre-loop part even in the aligned version,
3133     //  because it uses 64-bit loads/stores, so the aligned flag is actually ignored).
3134     StubRoutines::_jint_disjoint_arraycopy = StubRoutines::_arrayof_jint_disjoint_arraycopy;
3135     StubRoutines::_jint_arraycopy          = StubRoutines::_arrayof_jint_arraycopy;
3136 #endif
3137 
3138 
3139     //*** jlong
3140     // It is always aligned
3141     StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, &entry,
3142                                                                                   "arrayof_jlong_disjoint_arraycopy");
3143     StubRoutines::_arrayof_jlong_arraycopy          = generate_conjoint_long_copy(true, entry, &entry_jlong_arraycopy,
3144                                                                                   "arrayof_jlong_arraycopy");
3145     StubRoutines::_jlong_disjoint_arraycopy         = StubRoutines::_arrayof_jlong_disjoint_arraycopy;
3146     StubRoutines::_jlong_arraycopy                  = StubRoutines::_arrayof_jlong_arraycopy;
3147 
3148 
3149     //*** oops
3150     // Aligned versions
3151     StubRoutines::_arrayof_oop_disjoint_arraycopy        = generate_disjoint_oop_copy(true, &entry,
3152                                                                                       "arrayof_oop_disjoint_arraycopy");
3153     StubRoutines::_arrayof_oop_arraycopy                 = generate_conjoint_oop_copy(true, entry, &entry_oop_arraycopy,
3154                                                                                       "arrayof_oop_arraycopy");
3155     // Aligned versions without pre-barriers
3156     StubRoutines::_arrayof_oop_disjoint_arraycopy_no_pre = generate_disjoint_oop_copy(true, &entry,
3157                                                                                       "arrayof_oop_disjoint_arraycopy_no_pre", false);
3158     StubRoutines::_arrayof_oop_arraycopy_no_pre          = generate_conjoint_oop_copy(true, entry, NULL,
3159                                                                                       "arrayof_oop_arraycopy_no_pre", false);
3160 #ifdef _LP64
3161     if (UseCompressedOops) {
3162       // With compressed oops we need unalinged versions, notice that we overwrite entry_oop_arraycopy.
3163       StubRoutines::_oop_disjoint_arraycopy            = generate_disjoint_oop_copy(false, &entry,
3164                                                                                     "oop_disjoint_arraycopy");
3165       StubRoutines::_oop_arraycopy                     = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy,
3166                                                                                     "oop_arraycopy");
3167       // Unaligned versions without pre-barriers
3168       StubRoutines::_oop_disjoint_arraycopy_no_pre     = generate_disjoint_oop_copy(false, &entry,
3169                                                                                     "oop_disjoint_arraycopy_no_pre", false);
3170       StubRoutines::_oop_arraycopy_no_pre              = generate_conjoint_oop_copy(false, entry, NULL,
3171                                                                                     "oop_arraycopy_no_pre", false);
3172     } else
3173 #endif
3174     {
3175       // oop arraycopy is always aligned on 32bit and 64bit without compressed oops
3176       StubRoutines::_oop_disjoint_arraycopy            = StubRoutines::_arrayof_oop_disjoint_arraycopy;
3177       StubRoutines::_oop_arraycopy                     = StubRoutines::_arrayof_oop_arraycopy;
3178       StubRoutines::_oop_disjoint_arraycopy_no_pre     = StubRoutines::_arrayof_oop_disjoint_arraycopy_no_pre;
3179       StubRoutines::_oop_arraycopy_no_pre              = StubRoutines::_arrayof_oop_arraycopy_no_pre;
3180     }
3181 
3182     StubRoutines::_checkcast_arraycopy        = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
3183     StubRoutines::_checkcast_arraycopy_no_pre = generate_checkcast_copy("checkcast_arraycopy_no_pre", NULL, false);
3184 
3185     StubRoutines::_unsafe_arraycopy    = generate_unsafe_copy("unsafe_arraycopy",
3186                                                               entry_jbyte_arraycopy,
3187                                                               entry_jshort_arraycopy,
3188                                                               entry_jint_arraycopy,
3189                                                               entry_jlong_arraycopy);
3190     StubRoutines::_generic_arraycopy   = generate_generic_copy("generic_arraycopy",
3191                                                                entry_jbyte_arraycopy,
3192                                                                entry_jshort_arraycopy,
3193                                                                entry_jint_arraycopy,
3194                                                                entry_oop_arraycopy,
3195                                                                entry_jlong_arraycopy,
3196                                                                entry_checkcast_arraycopy);
3197 
3198     StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
3199     StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
3200     StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
3201     StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
3202     StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
3203     StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
3204   }


src/cpu/sparc/vm/stubGenerator_sparc.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File