Print this page


Split Close
Expand all
Collapse all
          --- old/src/cpu/sparc/vm/stubGenerator_sparc.cpp
          +++ new/src/cpu/sparc/vm/stubGenerator_sparc.cpp
↓ open down ↓ 2398 lines elided ↑ open up ↑
2399 2399    //      from:  O0
2400 2400    //      to:    O1
2401 2401    //      count: O2 treated as signed
2402 2402    //
2403 2403    address generate_conjoint_long_copy(bool aligned, address nooverlap_target,
2404 2404                                        address *entry, const char *name) {
2405 2405      __ align(CodeEntryAlignment);
2406 2406      StubCodeMark mark(this, "StubRoutines", name);
2407 2407      address start = __ pc();
2408 2408  
2409      -    assert(!aligned, "usage");
     2409 +    assert(aligned, "Should always be aligned");
2410 2410  
2411 2411      assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
2412 2412  
2413 2413      if (entry != NULL) {
2414 2414        *entry = __ pc();
2415 2415        // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2416 2416        BLOCK_COMMENT("Entry:");
2417 2417      }
2418 2418  
2419 2419      array_overlap_test(nooverlap_target, 3);
↓ open down ↓ 8 lines elided ↑ open up ↑
2428 2428    }
2429 2429  
2430 2430    //  Generate stub for disjoint oop copy.  If "aligned" is true, the
2431 2431    //  "from" and "to" addresses are assumed to be heapword aligned.
2432 2432    //
2433 2433    // Arguments for generated stub:
2434 2434    //      from:  O0
2435 2435    //      to:    O1
2436 2436    //      count: O2 treated as signed
2437 2437    //
2438      -  address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name) {
     2438 +  address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name,
     2439 +                                     bool need_pre_barrier = true) {
2439 2440  
2440 2441      const Register from  = O0;  // source array address
2441 2442      const Register to    = O1;  // destination array address
2442 2443      const Register count = O2;  // elements count
2443 2444  
2444 2445      __ align(CodeEntryAlignment);
2445 2446      StubCodeMark mark(this, "StubRoutines", name);
2446 2447      address start = __ pc();
2447 2448  
2448 2449      assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2449 2450  
2450 2451      if (entry != NULL) {
2451 2452        *entry = __ pc();
2452 2453        // caller can pass a 64-bit byte count here
2453 2454        BLOCK_COMMENT("Entry:");
2454 2455      }
2455 2456  
2456 2457      // save arguments for barrier generation
2457 2458      __ mov(to, G1);
2458 2459      __ mov(count, G5);
2459      -    gen_write_ref_array_pre_barrier(G1, G5);
     2460 +    if (need_pre_barrier) {
     2461 +      gen_write_ref_array_pre_barrier(G1, G5);
     2462 +    }
2460 2463    #ifdef _LP64
2461 2464      assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2462 2465      if (UseCompressedOops) {
2463 2466        generate_disjoint_int_copy_core(aligned);
2464 2467      } else {
2465 2468        generate_disjoint_long_copy_core(aligned);
2466 2469      }
2467 2470    #else
2468 2471      generate_disjoint_int_copy_core(aligned);
2469 2472    #endif
↓ open down ↓ 9 lines elided ↑ open up ↑
2479 2482  
2480 2483    //  Generate stub for conjoint oop copy.  If "aligned" is true, the
2481 2484    //  "from" and "to" addresses are assumed to be heapword aligned.
2482 2485    //
2483 2486    // Arguments for generated stub:
2484 2487    //      from:  O0
2485 2488    //      to:    O1
2486 2489    //      count: O2 treated as signed
2487 2490    //
2488 2491    address generate_conjoint_oop_copy(bool aligned, address nooverlap_target,
2489      -                                     address *entry, const char *name) {
     2492 +                                     address *entry, const char *name,
     2493 +                                     bool need_pre_barrier = true) {
2490 2494  
2491 2495      const Register from  = O0;  // source array address
2492 2496      const Register to    = O1;  // destination array address
2493 2497      const Register count = O2;  // elements count
2494 2498  
2495 2499      __ align(CodeEntryAlignment);
2496 2500      StubCodeMark mark(this, "StubRoutines", name);
2497 2501      address start = __ pc();
2498 2502  
2499 2503      assert_clean_int(count, O3);     // Make sure 'count' is clean int.
↓ open down ↓ 2 lines elided ↑ open up ↑
2502 2506        *entry = __ pc();
2503 2507        // caller can pass a 64-bit byte count here
2504 2508        BLOCK_COMMENT("Entry:");
2505 2509      }
2506 2510  
2507 2511      array_overlap_test(nooverlap_target, LogBytesPerHeapOop);
2508 2512  
2509 2513      // save arguments for barrier generation
2510 2514      __ mov(to, G1);
2511 2515      __ mov(count, G5);
2512      -    gen_write_ref_array_pre_barrier(G1, G5);
     2516 +    if (need_pre_barrier) {
     2517 +      gen_write_ref_array_pre_barrier(G1, G5);
     2518 +    }
2513 2519  
2514 2520    #ifdef _LP64
2515 2521      if (UseCompressedOops) {
2516 2522        generate_conjoint_int_copy_core(aligned);
2517 2523      } else {
2518 2524        generate_conjoint_long_copy_core(aligned);
2519 2525      }
2520 2526    #else
2521 2527      generate_conjoint_int_copy_core(aligned);
2522 2528    #endif
↓ open down ↓ 48 lines elided ↑ open up ↑
2571 2577    //  Generate stub for checked oop copy.
2572 2578    //
2573 2579    // Arguments for generated stub:
2574 2580    //      from:  O0
2575 2581    //      to:    O1
2576 2582    //      count: O2 treated as signed
2577 2583    //      ckoff: O3 (super_check_offset)
2578 2584    //      ckval: O4 (super_klass)
2579 2585    //      ret:   O0 zero for success; (-1^K) where K is partial transfer count
2580 2586    //
2581      -  address generate_checkcast_copy(const char *name, address *entry) {
     2587 +  address generate_checkcast_copy(const char *name, address *entry, bool need_pre_barrier = true) {
2582 2588  
2583 2589      const Register O0_from   = O0;      // source array address
2584 2590      const Register O1_to     = O1;      // destination array address
2585 2591      const Register O2_count  = O2;      // elements count
2586 2592      const Register O3_ckoff  = O3;      // super_check_offset
2587 2593      const Register O4_ckval  = O4;      // super_klass
2588 2594  
2589 2595      const Register O5_offset = O5;      // loop var, with stride wordSize
2590 2596      const Register G1_remain = G1;      // loop var, with stride -1
2591 2597      const Register G3_oop    = G3;      // actual oop copied
↓ open down ↓ 484 lines elided ↑ open up ↑
3076 3082  
3077 3083    void generate_arraycopy_stubs() {
3078 3084      address entry;
3079 3085      address entry_jbyte_arraycopy;
3080 3086      address entry_jshort_arraycopy;
3081 3087      address entry_jint_arraycopy;
3082 3088      address entry_oop_arraycopy;
3083 3089      address entry_jlong_arraycopy;
3084 3090      address entry_checkcast_arraycopy;
3085 3091  
3086      -    StubRoutines::_jbyte_disjoint_arraycopy  = generate_disjoint_byte_copy(false, &entry,
3087      -                                                                           "jbyte_disjoint_arraycopy");
3088      -    StubRoutines::_jbyte_arraycopy           = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy,
3089      -                                                                           "jbyte_arraycopy");
3090      -    StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry,
3091      -                                                                            "jshort_disjoint_arraycopy");
3092      -    StubRoutines::_jshort_arraycopy          = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy,
3093      -                                                                            "jshort_arraycopy");
3094      -    StubRoutines::_jint_disjoint_arraycopy   = generate_disjoint_int_copy(false, &entry,
3095      -                                                                          "jint_disjoint_arraycopy");
3096      -    StubRoutines::_jint_arraycopy            = generate_conjoint_int_copy(false, entry, &entry_jint_arraycopy,
3097      -                                                                          "jint_arraycopy");
3098      -    StubRoutines::_jlong_disjoint_arraycopy  = generate_disjoint_long_copy(false, &entry,
3099      -                                                                           "jlong_disjoint_arraycopy");
3100      -    StubRoutines::_jlong_arraycopy           = generate_conjoint_long_copy(false, entry, &entry_jlong_arraycopy,
3101      -                                                                           "jlong_arraycopy");
3102      -    StubRoutines::_oop_disjoint_arraycopy    = generate_disjoint_oop_copy(false, &entry,
3103      -                                                                          "oop_disjoint_arraycopy");
3104      -    StubRoutines::_oop_arraycopy             = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy,
3105      -                                                                          "oop_arraycopy");
3106      -
3107      -
3108      -    StubRoutines::_arrayof_jbyte_disjoint_arraycopy  = generate_disjoint_byte_copy(true, &entry,
3109      -                                                                                   "arrayof_jbyte_disjoint_arraycopy");
3110      -    StubRoutines::_arrayof_jbyte_arraycopy           = generate_conjoint_byte_copy(true, entry, NULL,
3111      -                                                                                   "arrayof_jbyte_arraycopy");
3112      -
     3092 +    //*** jbyte
     3093 +    // Always need alinged and unaligned versions
     3094 +    StubRoutines::_jbyte_disjoint_arraycopy         = generate_disjoint_byte_copy(false, &entry,
     3095 +                                                                                  "jbyte_disjoint_arraycopy");
     3096 +    StubRoutines::_jbyte_arraycopy                  = generate_conjoint_byte_copy(false, entry,
     3097 +                                                                                  &entry_jbyte_arraycopy,
     3098 +                                                                                  "jbyte_arraycopy");
     3099 +    StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry,
     3100 +                                                                                  "arrayof_jbyte_disjoint_arraycopy");
     3101 +    StubRoutines::_arrayof_jbyte_arraycopy          = generate_conjoint_byte_copy(true, entry, NULL,
     3102 +                                                                                  "arrayof_jbyte_arraycopy");
     3103 +
     3104 +    //*** jshort
     3105 +    // Always need alinged and unaligned versions
     3106 +    StubRoutines::_jshort_disjoint_arraycopy         = generate_disjoint_short_copy(false, &entry,
     3107 +                                                                                    "jshort_disjoint_arraycopy");
     3108 +    StubRoutines::_jshort_arraycopy                  = generate_conjoint_short_copy(false, entry,
     3109 +                                                                                    &entry_jshort_arraycopy,
     3110 +                                                                                    "jshort_arraycopy");
3113 3111      StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry,
3114 3112                                                                                      "arrayof_jshort_disjoint_arraycopy");
3115 3113      StubRoutines::_arrayof_jshort_arraycopy          = generate_conjoint_short_copy(true, entry, NULL,
3116 3114                                                                                      "arrayof_jshort_arraycopy");
3117 3115  
3118      -    StubRoutines::_arrayof_jint_disjoint_arraycopy   = generate_disjoint_int_copy(true, &entry,
3119      -                                                                                  "arrayof_jint_disjoint_arraycopy");
     3116 +    //*** jint
     3117 +    // Aligned versions
     3118 +    StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry,
     3119 +                                                                                "arrayof_jint_disjoint_arraycopy");
     3120 +    StubRoutines::_arrayof_jint_arraycopy          = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy,
     3121 +                                                                                "arrayof_jint_arraycopy");
3120 3122  #ifdef _LP64
3121      -    // since sizeof(jint) < sizeof(HeapWord), there's a different flavor:
3122      -    StubRoutines::_arrayof_jint_arraycopy     = generate_conjoint_int_copy(true, entry, NULL, "arrayof_jint_arraycopy");
3123      -  #else
3124      -    StubRoutines::_arrayof_jint_arraycopy     = StubRoutines::_jint_arraycopy;
     3123 +    // In 64 bit we need both aligned and unaligned versions of jint arraycopy.
     3124 +    // entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it).
     3125 +    StubRoutines::_jint_disjoint_arraycopy         = generate_disjoint_int_copy(false, &entry,
     3126 +                                                                                "jint_disjoint_arraycopy");
     3127 +    StubRoutines::_jint_arraycopy                  = generate_conjoint_int_copy(false, entry,
     3128 +                                                                                &entry_jint_arraycopy,
     3129 +                                                                                "jint_arraycopy");
     3130 +#else
     3131 +    // In 32 bit jints are always HeapWordSize aligned, so always use the aligned version
     3132 +    // (in fact in 32bit we always have a pre-loop part even in the aligned version,
     3133 +    //  because it uses 64-bit loads/stores, so the aligned flag is actually ignored).
     3134 +    StubRoutines::_jint_disjoint_arraycopy = StubRoutines::_arrayof_jint_disjoint_arraycopy;
     3135 +    StubRoutines::_jint_arraycopy          = StubRoutines::_arrayof_jint_arraycopy;
3125 3136  #endif
3126 3137  
3127      -    StubRoutines::_arrayof_jlong_disjoint_arraycopy  = generate_disjoint_long_copy(true, NULL,
3128      -                                                                                   "arrayof_jlong_disjoint_arraycopy");
3129      -    StubRoutines::_arrayof_oop_disjoint_arraycopy    =  generate_disjoint_oop_copy(true, NULL,
3130      -                                                                                   "arrayof_oop_disjoint_arraycopy");
3131 3138  
3132      -    StubRoutines::_arrayof_jlong_arraycopy    = StubRoutines::_jlong_arraycopy;
3133      -    StubRoutines::_arrayof_oop_arraycopy      = StubRoutines::_oop_arraycopy;
     3139 +    //*** jlong
     3140 +    // It is always aligned
     3141 +    StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, &entry,
     3142 +                                                                                  "arrayof_jlong_disjoint_arraycopy");
     3143 +    StubRoutines::_arrayof_jlong_arraycopy          = generate_conjoint_long_copy(true, entry, &entry_jlong_arraycopy,
     3144 +                                                                                  "arrayof_jlong_arraycopy");
     3145 +    StubRoutines::_jlong_disjoint_arraycopy         = StubRoutines::_arrayof_jlong_disjoint_arraycopy;
     3146 +    StubRoutines::_jlong_arraycopy                  = StubRoutines::_arrayof_jlong_arraycopy;
     3147 +
     3148 +
     3149 +    //*** oops
     3150 +    // Aligned versions
     3151 +    StubRoutines::_arrayof_oop_disjoint_arraycopy        = generate_disjoint_oop_copy(true, &entry,
     3152 +                                                                                      "arrayof_oop_disjoint_arraycopy");
     3153 +    StubRoutines::_arrayof_oop_arraycopy                 = generate_conjoint_oop_copy(true, entry, &entry_oop_arraycopy,
     3154 +                                                                                      "arrayof_oop_arraycopy");
     3155 +    // Aligned versions without pre-barriers
     3156 +    StubRoutines::_arrayof_oop_disjoint_arraycopy_no_pre = generate_disjoint_oop_copy(true, &entry,
     3157 +                                                                                      "arrayof_oop_disjoint_arraycopy_no_pre", false);
     3158 +    StubRoutines::_arrayof_oop_arraycopy_no_pre          = generate_conjoint_oop_copy(true, entry, NULL,
     3159 +                                                                                      "arrayof_oop_arraycopy_no_pre", false);
     3160 +#ifdef _LP64
     3161 +    if (UseCompressedOops) {
     3162 +      // With compressed oops we need unalinged versions, notice that we overwrite entry_oop_arraycopy.
     3163 +      StubRoutines::_oop_disjoint_arraycopy            = generate_disjoint_oop_copy(false, &entry,
     3164 +                                                                                    "oop_disjoint_arraycopy");
     3165 +      StubRoutines::_oop_arraycopy                     = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy,
     3166 +                                                                                    "oop_arraycopy");
     3167 +      // Unaligned versions without pre-barriers
     3168 +      StubRoutines::_oop_disjoint_arraycopy_no_pre     = generate_disjoint_oop_copy(false, &entry,
     3169 +                                                                                    "oop_disjoint_arraycopy_no_pre", false);
     3170 +      StubRoutines::_oop_arraycopy_no_pre              = generate_conjoint_oop_copy(false, entry, NULL,
     3171 +                                                                                    "oop_arraycopy_no_pre", false);
     3172 +    } else
     3173 +#endif
     3174 +    {
     3175 +      // oop arraycopy is always aligned on 32bit and 64bit without compressed oops
     3176 +      StubRoutines::_oop_disjoint_arraycopy            = StubRoutines::_arrayof_oop_disjoint_arraycopy;
     3177 +      StubRoutines::_oop_arraycopy                     = StubRoutines::_arrayof_oop_arraycopy;
     3178 +      StubRoutines::_oop_disjoint_arraycopy_no_pre     = StubRoutines::_arrayof_oop_disjoint_arraycopy_no_pre;
     3179 +      StubRoutines::_oop_arraycopy_no_pre              = StubRoutines::_arrayof_oop_arraycopy_no_pre;
     3180 +    }
     3181 +
     3182 +    StubRoutines::_checkcast_arraycopy        = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
     3183 +    StubRoutines::_checkcast_arraycopy_no_pre = generate_checkcast_copy("checkcast_arraycopy_no_pre", NULL, false);
3134 3184  
3135      -    StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
3136 3185      StubRoutines::_unsafe_arraycopy    = generate_unsafe_copy("unsafe_arraycopy",
3137 3186                                                                entry_jbyte_arraycopy,
3138 3187                                                                entry_jshort_arraycopy,
3139 3188                                                                entry_jint_arraycopy,
3140 3189                                                                entry_jlong_arraycopy);
3141 3190      StubRoutines::_generic_arraycopy   = generate_generic_copy("generic_arraycopy",
3142 3191                                                                 entry_jbyte_arraycopy,
3143 3192                                                                 entry_jshort_arraycopy,
3144 3193                                                                 entry_jint_arraycopy,
3145 3194                                                                 entry_oop_arraycopy,
↓ open down ↓ 128 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX