< prev index next >

src/cpu/sparc/vm/stubGenerator_sparc.cpp

Print this page


   1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


4771       __ faligndata(as_FloatRegister(i*2 + 16), as_FloatRegister(i*2 + 18), as_FloatRegister(i*2 + 16));
4772     }
4773     __ sha512();
4774     if (multi_block) {
4775       __ add(ofs, 128, ofs);
4776       __ add(buf, 128, buf);
4777       __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha512_unaligned_input_loop);
4778       __ mov(ofs, O0); // to be returned
4779     }
4780 
4781     // store F0-F14 into state and return
4782     for (i = 0; i < 7; i++) {
4783       __ stf(FloatRegisterImpl::D, as_FloatRegister(i*2), state, i*8);
4784     }
4785     __ retl();
4786     __ delayed()->stf(FloatRegisterImpl::D, F14, state, 0x38);
4787 
4788     return start;
4789   }
4790 




























































































































4791   void generate_initial() {
4792     // Generates all stubs and initializes the entry points
4793 
4794     //------------------------------------------------------------------------------------------------------------------------
4795     // entry points that exist in all platforms
4796     // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than
4797     //       the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp.
4798     StubRoutines::_forward_exception_entry                 = generate_forward_exception();
4799 
4800     StubRoutines::_call_stub_entry                         = generate_call_stub(StubRoutines::_call_stub_return_address);
4801     StubRoutines::_catch_exception_entry                   = generate_catch_exception();
4802 
4803     //------------------------------------------------------------------------------------------------------------------------
4804     // entry points that are platform specific
4805     StubRoutines::Sparc::_test_stop_entry                  = generate_test_stop();
4806 
4807     StubRoutines::Sparc::_stop_subroutine_entry            = generate_stop_subroutine();
4808     StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows();
4809 
4810 #if !defined(COMPILER2) && !defined(_LP64)


4843     // arraycopy stubs used by compilers
4844     generate_arraycopy_stubs();
4845 
4846     // Don't initialize the platform math functions since sparc
4847     // doesn't have intrinsics for these operations.
4848 
4849     // Safefetch stubs.
4850     generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
4851                                                        &StubRoutines::_safefetch32_fault_pc,
4852                                                        &StubRoutines::_safefetch32_continuation_pc);
4853     generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
4854                                                        &StubRoutines::_safefetchN_fault_pc,
4855                                                        &StubRoutines::_safefetchN_continuation_pc);
4856 
4857     // generate AES intrinsics code
4858     if (UseAESIntrinsics) {
4859       StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
4860       StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
4861       StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
4862       StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();




4863     }
4864 
4865     // generate SHA1/SHA256/SHA512 intrinsics code
4866     if (UseSHA1Intrinsics) {
4867       StubRoutines::_sha1_implCompress     = generate_sha1_implCompress(false,   "sha1_implCompress");
4868       StubRoutines::_sha1_implCompressMB   = generate_sha1_implCompress(true,    "sha1_implCompressMB");
4869     }
4870     if (UseSHA256Intrinsics) {
4871       StubRoutines::_sha256_implCompress   = generate_sha256_implCompress(false, "sha256_implCompress");
4872       StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true,  "sha256_implCompressMB");
4873     }
4874     if (UseSHA512Intrinsics) {
4875       StubRoutines::_sha512_implCompress   = generate_sha512_implCompress(false, "sha512_implCompress");
4876       StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true,  "sha512_implCompressMB");
4877     }
4878   }
4879 
4880 
4881  public:
4882   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {


   1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


4771       __ faligndata(as_FloatRegister(i*2 + 16), as_FloatRegister(i*2 + 18), as_FloatRegister(i*2 + 16));
4772     }
4773     __ sha512();
4774     if (multi_block) {
4775       __ add(ofs, 128, ofs);
4776       __ add(buf, 128, buf);
4777       __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha512_unaligned_input_loop);
4778       __ mov(ofs, O0); // to be returned
4779     }
4780 
4781     // store F0-F14 into state and return
4782     for (i = 0; i < 7; i++) {
4783       __ stf(FloatRegisterImpl::D, as_FloatRegister(i*2), state, i*8);
4784     }
4785     __ retl();
4786     __ delayed()->stf(FloatRegisterImpl::D, F14, state, 0x38);
4787 
4788     return start;
4789   }
4790 
4791   /* Single and multi-block ghash operations */
4792   address generate_ghash_processBlocks() {
4793       __ align(CodeEntryAlignment);
4794       Label L_ghash_loop, L_aligned, L_main;
4795       StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks");
4796       address start = __ pc();
4797 
4798       Register state = I0;
4799       Register subkeyH = I1;
4800       Register data = I2;
4801       Register len = I3;
4802 
4803       __ save_frame(0);
4804 
4805       __ ldx(state, 0, O0);
4806       __ ldx(state, 8, O1);
4807 
4808       // Loop label for multiblock operations
4809       __ BIND(L_ghash_loop);
4810 
4811       // Check if 'data' is unaligned
4812       __ andcc(data, 7, G1);
4813       __ br(Assembler::zero, false, Assembler::pt, L_aligned);
4814       __ delayed()->nop();
4815 
4816       Register left_shift = L1;
4817       Register right_shift = L2;
4818       Register data_ptr = L3;
4819 
4820       // Get left and right shift values in bits
4821       __ sll(G1, LogBitsPerByte, left_shift);
4822       __ mov(64, right_shift);
4823       __ sub(right_shift, left_shift, right_shift);
4824 
4825       // Align to read 'data'
4826       __ sub(data, G1, data_ptr);
4827 
4828       // Load first 8 bytes of 'data'
4829       __ ldx(data_ptr, 0, O4);
4830       __ sllx(O4, left_shift, O4);
4831       __ ldx(data_ptr, 8, O5);
4832       __ srlx(O5, right_shift, G4);
4833       __ bset(G4, O4);
4834 
4835       // Load second 8 bytes of 'data'
4836       __ sllx(O5, left_shift, O5);
4837       __ ldx(data_ptr, 16, G4);
4838       __ srlx(G4, right_shift, G4);
4839       __ ba(L_main);
4840       __ delayed()->bset(G4, O5);
4841 
4842       // If 'data' is aligned, load normally
4843       __ BIND(L_aligned);
4844       __ ldx(data, 0, O4);
4845       __ ldx(data, 8, O5);
4846 
4847       __ BIND(L_main);
4848       __ ldx(subkeyH, 0, O2);
4849       __ ldx(subkeyH, 8, O3);
4850 
4851       __ xor3(O0, O4, O0);
4852       __ xor3(O1, O5, O1);
4853 
4854       __ xmulxhi(O0, O3, G3);
4855       __ xmulx(O0, O2, O5);
4856       __ xmulxhi(O1, O2, G4);
4857       __ xmulxhi(O1, O3, G5);
4858       __ xmulx(O0, O3, G1);
4859       __ xmulx(O1, O3, G2);
4860       __ xmulx(O1, O2, O3);
4861       __ xmulxhi(O0, O2, O4);
4862 
4863       __ mov(0xE1, O0);
4864       __ sllx(O0, 56, O0);
4865 
4866       __ xor3(O5, G3, O5);
4867       __ xor3(O5, G4, O5);
4868       __ xor3(G5, G1, G1);
4869       __ xor3(G1, O3, G1);
4870       __ srlx(G2, 63, O1);
4871       __ srlx(G1, 63, G3);
4872       __ sllx(G2, 63, O3);
4873       __ sllx(G2, 58, O2);
4874       __ xor3(O3, O2, O2);
4875 
4876       __ sllx(G1, 1, G1);
4877       __ or3(G1, O1, G1);
4878 
4879       __ xor3(G1, O2, G1);
4880 
4881       __ sllx(G2, 1, G2);
4882 
4883       __ xmulxhi(G1, O0, O1);
4884       __ xmulx(G1, O0, O2);
4885       __ xmulxhi(G2, O0, O3);
4886       __ xmulx(G2, O0, G1);
4887 
4888       __ xor3(O4, O1, O4);
4889       __ xor3(O5, O2, O5);
4890       __ xor3(O5, O3, O5);
4891 
4892       __ sllx(O4, 1, O2);
4893       __ srlx(O5, 63, O3);
4894 
4895       __ or3(O2, O3, O0);
4896 
4897       __ sllx(O5, 1, O1);
4898       __ srlx(G1, 63, O2);
4899       __ or3(O1, O2, O1);
4900       __ xor3(O1, G3, O1);
4901 
4902       __ deccc(len);
4903       __ br(Assembler::notZero, true, Assembler::pt, L_ghash_loop);
4904       __ delayed()->add(data, 16, data);
4905 
4906       __ stx(O0, I0, 0);
4907       __ stx(O1, I0, 8);
4908 
4909       __ ret();
4910       __ delayed()->restore();
4911 
4912       return start;
4913   }
4914 
4915   void generate_initial() {
4916     // Generates all stubs and initializes the entry points
4917 
4918     //------------------------------------------------------------------------------------------------------------------------
4919     // entry points that exist in all platforms
4920     // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than
4921     //       the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp.
4922     StubRoutines::_forward_exception_entry                 = generate_forward_exception();
4923 
4924     StubRoutines::_call_stub_entry                         = generate_call_stub(StubRoutines::_call_stub_return_address);
4925     StubRoutines::_catch_exception_entry                   = generate_catch_exception();
4926 
4927     //------------------------------------------------------------------------------------------------------------------------
4928     // entry points that are platform specific
4929     StubRoutines::Sparc::_test_stop_entry                  = generate_test_stop();
4930 
4931     StubRoutines::Sparc::_stop_subroutine_entry            = generate_stop_subroutine();
4932     StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows();
4933 
4934 #if !defined(COMPILER2) && !defined(_LP64)


4967     // arraycopy stubs used by compilers
4968     generate_arraycopy_stubs();
4969 
4970     // Don't initialize the platform math functions since sparc
4971     // doesn't have intrinsics for these operations.
4972 
4973     // Safefetch stubs.
4974     generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
4975                                                        &StubRoutines::_safefetch32_fault_pc,
4976                                                        &StubRoutines::_safefetch32_continuation_pc);
4977     generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
4978                                                        &StubRoutines::_safefetchN_fault_pc,
4979                                                        &StubRoutines::_safefetchN_continuation_pc);
4980 
4981     // generate AES intrinsics code
4982     if (UseAESIntrinsics) {
4983       StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
4984       StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
4985       StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
4986       StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
4987     }
4988     // generate GHASH intrinsics code
4989     if (UseGHASHIntrinsics) {
4990       StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks();
4991     }
4992 
4993     // generate SHA1/SHA256/SHA512 intrinsics code
4994     if (UseSHA1Intrinsics) {
4995       StubRoutines::_sha1_implCompress     = generate_sha1_implCompress(false,   "sha1_implCompress");
4996       StubRoutines::_sha1_implCompressMB   = generate_sha1_implCompress(true,    "sha1_implCompressMB");
4997     }
4998     if (UseSHA256Intrinsics) {
4999       StubRoutines::_sha256_implCompress   = generate_sha256_implCompress(false, "sha256_implCompress");
5000       StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true,  "sha256_implCompressMB");
5001     }
5002     if (UseSHA512Intrinsics) {
5003       StubRoutines::_sha512_implCompress   = generate_sha512_implCompress(false, "sha512_implCompress");
5004       StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true,  "sha512_implCompressMB");
5005     }
5006   }
5007 
5008 
5009  public:
5010   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {


< prev index next >