< prev index next >

src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp

Print this page




2348         break;
2349       case T_SHORT:
2350         __ tbz(count, 0, L_fill_4);
2351         __ strh(value, Address(__ post(to, 2)));
2352         __ bind(L_fill_4);
2353         __ tbz(count, 1, L_exit2);
2354         __ strw(value, Address(to));
2355         break;
2356       case T_INT:
2357         __ cbzw(count, L_exit2);
2358         __ strw(value, Address(to));
2359         break;
2360       default: ShouldNotReachHere();
2361     }
2362     __ bind(L_exit2);
2363     __ leave();
2364     __ ret(lr);
2365     return start;
2366   }
2367 






































2368   void generate_arraycopy_stubs() {
2369     address entry;
2370     address entry_jbyte_arraycopy;
2371     address entry_jshort_arraycopy;
2372     address entry_jint_arraycopy;
2373     address entry_oop_arraycopy;
2374     address entry_jlong_arraycopy;
2375     address entry_checkcast_arraycopy;
2376 
2377     generate_copy_longs(copy_f, r0, r1, rscratch2, copy_forwards);
2378     generate_copy_longs(copy_b, r0, r1, rscratch2, copy_backwards);
2379 
2380     StubRoutines::aarch64::_zero_blocks = generate_zero_blocks();
2381 
2382     //*** jbyte
2383     // Always need aligned and unaligned versions
2384     StubRoutines::_jbyte_disjoint_arraycopy         = generate_disjoint_byte_copy(false, &entry,
2385                                                                                   "jbyte_disjoint_arraycopy");
2386     StubRoutines::_jbyte_arraycopy                  = generate_conjoint_byte_copy(false, entry,
2387                                                                                   &entry_jbyte_arraycopy,


5807     if (UseMontgomeryMultiplyIntrinsic) {
5808       StubCodeMark mark(this, "StubRoutines", "montgomeryMultiply");
5809       MontgomeryMultiplyGenerator g(_masm, /*squaring*/false);
5810       StubRoutines::_montgomeryMultiply = g.generate_multiply();
5811     }
5812 
5813     if (UseMontgomerySquareIntrinsic) {
5814       StubCodeMark mark(this, "StubRoutines", "montgomerySquare");
5815       MontgomeryMultiplyGenerator g(_masm, /*squaring*/true);
5816       // We use generate_multiply() rather than generate_square()
5817       // because it's faster for the sizes of modulus we care about.
5818       StubRoutines::_montgomerySquare = g.generate_multiply();
5819     }
5820 
5821 #ifndef BUILTIN_SIM
5822     // generate GHASH intrinsics code
5823     if (UseGHASHIntrinsics) {
5824       StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks();
5825     }
5826 




5827     if (UseAESIntrinsics) {
5828       StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
5829       StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
5830       StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
5831       StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt();
5832     }
5833 
5834     if (UseSHA1Intrinsics) {
5835       StubRoutines::_sha1_implCompress     = generate_sha1_implCompress(false,   "sha1_implCompress");
5836       StubRoutines::_sha1_implCompressMB   = generate_sha1_implCompress(true,    "sha1_implCompressMB");
5837     }
5838     if (UseSHA256Intrinsics) {
5839       StubRoutines::_sha256_implCompress   = generate_sha256_implCompress(false, "sha256_implCompress");
5840       StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true,  "sha256_implCompressMB");
5841     }
5842 
5843     // generate Adler32 intrinsics code
5844     if (UseAdler32Intrinsics) {
5845       StubRoutines::_updateBytesAdler32 = generate_updateBytesAdler32();
5846     }




2348         break;
2349       case T_SHORT:
2350         __ tbz(count, 0, L_fill_4);
2351         __ strh(value, Address(__ post(to, 2)));
2352         __ bind(L_fill_4);
2353         __ tbz(count, 1, L_exit2);
2354         __ strw(value, Address(to));
2355         break;
2356       case T_INT:
2357         __ cbzw(count, L_exit2);
2358         __ strw(value, Address(to));
2359         break;
2360       default: ShouldNotReachHere();
2361     }
2362     __ bind(L_exit2);
2363     __ leave();
2364     __ ret(lr);
2365     return start;
2366   }
2367 
2368   address generate_data_cache_writeback() {
2369     const Register line        = c_rarg0;  // address of line to write back
2370 
2371     __ align(CodeEntryAlignment);
2372 
2373     StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback");
2374 
2375     address start = __ pc();
2376     __ enter();
2377     __ cache_wb(Address(line, 0));
2378     __ leave();
2379     __ ret(lr);
2380 
2381     return start;
2382   }
2383 
2384   address generate_data_cache_writeback_sync() {
2385     const Register is_pre     = c_rarg0;  // pre or post sync
2386 
2387     __ align(CodeEntryAlignment);
2388 
2389     StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback_sync");
2390 
2391     // pre wbsync is a no-op
2392     // post wbsync translates to an sfence
2393 
2394     Label skip;
2395     address start = __ pc();
2396     __ enter();
2397     __ cbnz(is_pre, skip);
2398     __ cache_wbsync(false);
2399     __ bind(skip);
2400     __ leave();
2401     __ ret(lr);
2402 
2403     return start;
2404   }
2405 
2406   void generate_arraycopy_stubs() {
2407     address entry;
2408     address entry_jbyte_arraycopy;
2409     address entry_jshort_arraycopy;
2410     address entry_jint_arraycopy;
2411     address entry_oop_arraycopy;
2412     address entry_jlong_arraycopy;
2413     address entry_checkcast_arraycopy;
2414 
2415     generate_copy_longs(copy_f, r0, r1, rscratch2, copy_forwards);
2416     generate_copy_longs(copy_b, r0, r1, rscratch2, copy_backwards);
2417 
2418     StubRoutines::aarch64::_zero_blocks = generate_zero_blocks();
2419 
2420     //*** jbyte
2421     // Always need aligned and unaligned versions
2422     StubRoutines::_jbyte_disjoint_arraycopy         = generate_disjoint_byte_copy(false, &entry,
2423                                                                                   "jbyte_disjoint_arraycopy");
2424     StubRoutines::_jbyte_arraycopy                  = generate_conjoint_byte_copy(false, entry,
2425                                                                                   &entry_jbyte_arraycopy,


5845     if (UseMontgomeryMultiplyIntrinsic) {
5846       StubCodeMark mark(this, "StubRoutines", "montgomeryMultiply");
5847       MontgomeryMultiplyGenerator g(_masm, /*squaring*/false);
5848       StubRoutines::_montgomeryMultiply = g.generate_multiply();
5849     }
5850 
5851     if (UseMontgomerySquareIntrinsic) {
5852       StubCodeMark mark(this, "StubRoutines", "montgomerySquare");
5853       MontgomeryMultiplyGenerator g(_masm, /*squaring*/true);
5854       // We use generate_multiply() rather than generate_square()
5855       // because it's faster for the sizes of modulus we care about.
5856       StubRoutines::_montgomerySquare = g.generate_multiply();
5857     }
5858 
5859 #ifndef BUILTIN_SIM
5860     // generate GHASH intrinsics code
5861     if (UseGHASHIntrinsics) {
5862       StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks();
5863     }
5864 
5865     // data cache line writeback
5866     StubRoutines::_data_cache_writeback = generate_data_cache_writeback();
5867     StubRoutines::_data_cache_writeback_sync = generate_data_cache_writeback_sync();
5868     
5869     if (UseAESIntrinsics) {
5870       StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
5871       StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
5872       StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
5873       StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt();
5874     }
5875 
5876     if (UseSHA1Intrinsics) {
5877       StubRoutines::_sha1_implCompress     = generate_sha1_implCompress(false,   "sha1_implCompress");
5878       StubRoutines::_sha1_implCompressMB   = generate_sha1_implCompress(true,    "sha1_implCompressMB");
5879     }
5880     if (UseSHA256Intrinsics) {
5881       StubRoutines::_sha256_implCompress   = generate_sha256_implCompress(false, "sha256_implCompress");
5882       StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true,  "sha256_implCompressMB");
5883     }
5884 
5885     // generate Adler32 intrinsics code
5886     if (UseAdler32Intrinsics) {
5887       StubRoutines::_updateBytesAdler32 = generate_updateBytesAdler32();
5888     }


< prev index next >