< prev index next >

src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp

Print this page




2348         break;
2349       case T_SHORT:
2350         __ tbz(count, 0, L_fill_4);
2351         __ strh(value, Address(__ post(to, 2)));
2352         __ bind(L_fill_4);
2353         __ tbz(count, 1, L_exit2);
2354         __ strw(value, Address(to));
2355         break;
2356       case T_INT:
2357         __ cbzw(count, L_exit2);
2358         __ strw(value, Address(to));
2359         break;
2360       default: ShouldNotReachHere();
2361     }
2362     __ bind(L_exit2);
2363     __ leave();
2364     __ ret(lr);
2365     return start;
2366   }
2367 
































2368   void generate_arraycopy_stubs() {
2369     address entry;
2370     address entry_jbyte_arraycopy;
2371     address entry_jshort_arraycopy;
2372     address entry_jint_arraycopy;
2373     address entry_oop_arraycopy;
2374     address entry_jlong_arraycopy;
2375     address entry_checkcast_arraycopy;
2376 
2377     generate_copy_longs(copy_f, r0, r1, rscratch2, copy_forwards);
2378     generate_copy_longs(copy_b, r0, r1, rscratch2, copy_backwards);
2379 
2380     StubRoutines::aarch64::_zero_blocks = generate_zero_blocks();
2381 
2382     //*** jbyte
2383     // Always need aligned and unaligned versions
2384     StubRoutines::_jbyte_disjoint_arraycopy         = generate_disjoint_byte_copy(false, &entry,
2385                                                                                   "jbyte_disjoint_arraycopy");
2386     StubRoutines::_jbyte_arraycopy                  = generate_conjoint_byte_copy(false, entry,
2387                                                                                   &entry_jbyte_arraycopy,


5807     if (UseMontgomeryMultiplyIntrinsic) {
5808       StubCodeMark mark(this, "StubRoutines", "montgomeryMultiply");
5809       MontgomeryMultiplyGenerator g(_masm, /*squaring*/false);
5810       StubRoutines::_montgomeryMultiply = g.generate_multiply();
5811     }
5812 
5813     if (UseMontgomerySquareIntrinsic) {
5814       StubCodeMark mark(this, "StubRoutines", "montgomerySquare");
5815       MontgomeryMultiplyGenerator g(_masm, /*squaring*/true);
5816       // We use generate_multiply() rather than generate_square()
5817       // because it's faster for the sizes of modulus we care about.
5818       StubRoutines::_montgomerySquare = g.generate_multiply();
5819     }
5820 
5821 #ifndef BUILTIN_SIM
5822     // generate GHASH intrinsics code
5823     if (UseGHASHIntrinsics) {
5824       StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks();
5825     }
5826 




5827     if (UseAESIntrinsics) {
5828       StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
5829       StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
5830       StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
5831       StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt();
5832     }
5833 
5834     if (UseSHA1Intrinsics) {
5835       StubRoutines::_sha1_implCompress     = generate_sha1_implCompress(false,   "sha1_implCompress");
5836       StubRoutines::_sha1_implCompressMB   = generate_sha1_implCompress(true,    "sha1_implCompressMB");
5837     }
5838     if (UseSHA256Intrinsics) {
5839       StubRoutines::_sha256_implCompress   = generate_sha256_implCompress(false, "sha256_implCompress");
5840       StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true,  "sha256_implCompressMB");
5841     }
5842 
5843     // generate Adler32 intrinsics code
5844     if (UseAdler32Intrinsics) {
5845       StubRoutines::_updateBytesAdler32 = generate_updateBytesAdler32();
5846     }




2348         break;
2349       case T_SHORT:
2350         __ tbz(count, 0, L_fill_4);
2351         __ strh(value, Address(__ post(to, 2)));
2352         __ bind(L_fill_4);
2353         __ tbz(count, 1, L_exit2);
2354         __ strw(value, Address(to));
2355         break;
2356       case T_INT:
2357         __ cbzw(count, L_exit2);
2358         __ strw(value, Address(to));
2359         break;
2360       default: ShouldNotReachHere();
2361     }
2362     __ bind(L_exit2);
2363     __ leave();
2364     __ ret(lr);
2365     return start;
2366   }
2367 
2368   address generate_data_cache_writeback() {
2369     const Register line        = c_rarg0;  // address of line to write back
2370 
2371     __ align(CodeEntryAlignment);
2372 
2373     StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback");
2374 
2375     address start = __ pc();
2376     __ enter();
2377     __ cache_wb(Address(line, 0));
2378     __ leave();
2379     __ ret(lr);
2380 
2381     return start;
2382   }
2383 
2384   address generate_data_cache_writeback_sync() {
2385     const Register kind       = c_rarg0;  // pre or post sync (unused)
2386 
2387     __ align(CodeEntryAlignment);
2388 
2389     StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback_sync");
2390 
2391     address start = __ pc();
2392     __ enter();
2393     __ cache_wbsync();
2394     __ leave();
2395     __ ret(lr);
2396 
2397     return start;
2398   }
2399 
2400   void generate_arraycopy_stubs() {
2401     address entry;
2402     address entry_jbyte_arraycopy;
2403     address entry_jshort_arraycopy;
2404     address entry_jint_arraycopy;
2405     address entry_oop_arraycopy;
2406     address entry_jlong_arraycopy;
2407     address entry_checkcast_arraycopy;
2408 
2409     generate_copy_longs(copy_f, r0, r1, rscratch2, copy_forwards);
2410     generate_copy_longs(copy_b, r0, r1, rscratch2, copy_backwards);
2411 
2412     StubRoutines::aarch64::_zero_blocks = generate_zero_blocks();
2413 
2414     //*** jbyte
2415     // Always need aligned and unaligned versions
2416     StubRoutines::_jbyte_disjoint_arraycopy         = generate_disjoint_byte_copy(false, &entry,
2417                                                                                   "jbyte_disjoint_arraycopy");
2418     StubRoutines::_jbyte_arraycopy                  = generate_conjoint_byte_copy(false, entry,
2419                                                                                   &entry_jbyte_arraycopy,


5839     if (UseMontgomeryMultiplyIntrinsic) {
5840       StubCodeMark mark(this, "StubRoutines", "montgomeryMultiply");
5841       MontgomeryMultiplyGenerator g(_masm, /*squaring*/false);
5842       StubRoutines::_montgomeryMultiply = g.generate_multiply();
5843     }
5844 
5845     if (UseMontgomerySquareIntrinsic) {
5846       StubCodeMark mark(this, "StubRoutines", "montgomerySquare");
5847       MontgomeryMultiplyGenerator g(_masm, /*squaring*/true);
5848       // We use generate_multiply() rather than generate_square()
5849       // because it's faster for the sizes of modulus we care about.
5850       StubRoutines::_montgomerySquare = g.generate_multiply();
5851     }
5852 
5853 #ifndef BUILTIN_SIM
5854     // generate GHASH intrinsics code
5855     if (UseGHASHIntrinsics) {
5856       StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks();
5857     }
5858 
5859     // data cache line writeback
5860     StubRoutines::_data_cache_writeback = generate_data_cache_writeback();
5861     StubRoutines::_data_cache_writeback_sync = generate_data_cache_writeback_sync();
5862     
5863     if (UseAESIntrinsics) {
5864       StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
5865       StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
5866       StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
5867       StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt();
5868     }
5869 
5870     if (UseSHA1Intrinsics) {
5871       StubRoutines::_sha1_implCompress     = generate_sha1_implCompress(false,   "sha1_implCompress");
5872       StubRoutines::_sha1_implCompressMB   = generate_sha1_implCompress(true,    "sha1_implCompressMB");
5873     }
5874     if (UseSHA256Intrinsics) {
5875       StubRoutines::_sha256_implCompress   = generate_sha256_implCompress(false, "sha256_implCompress");
5876       StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true,  "sha256_implCompressMB");
5877     }
5878 
5879     // generate Adler32 intrinsics code
5880     if (UseAdler32Intrinsics) {
5881       StubRoutines::_updateBytesAdler32 = generate_updateBytesAdler32();
5882     }


< prev index next >