src/cpu/x86/vm/macroAssembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 7088419 Sdiff src/cpu/x86/vm

src/cpu/x86/vm/macroAssembler_x86.hpp

Print this page


   1 /*
   2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 882   void addsd(XMMRegister dst, AddressLiteral src);
 883 
 884   void addss(XMMRegister dst, XMMRegister src)    { Assembler::addss(dst, src); }
 885   void addss(XMMRegister dst, Address src)        { Assembler::addss(dst, src); }
 886   void addss(XMMRegister dst, AddressLiteral src);
 887 
 888   void divsd(XMMRegister dst, XMMRegister src)    { Assembler::divsd(dst, src); }
 889   void divsd(XMMRegister dst, Address src)        { Assembler::divsd(dst, src); }
 890   void divsd(XMMRegister dst, AddressLiteral src);
 891 
 892   void divss(XMMRegister dst, XMMRegister src)    { Assembler::divss(dst, src); }
 893   void divss(XMMRegister dst, Address src)        { Assembler::divss(dst, src); }
 894   void divss(XMMRegister dst, AddressLiteral src);
 895 
 896   // Move Unaligned Double Quadword
 897   void movdqu(Address     dst, XMMRegister src)   { Assembler::movdqu(dst, src); }
 898   void movdqu(XMMRegister dst, Address src)       { Assembler::movdqu(dst, src); }
 899   void movdqu(XMMRegister dst, XMMRegister src)   { Assembler::movdqu(dst, src); }
 900   void movdqu(XMMRegister dst, AddressLiteral src);
 901 





 902   void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
 903   void movsd(Address dst, XMMRegister src)     { Assembler::movsd(dst, src); }
 904   void movsd(XMMRegister dst, Address src)     { Assembler::movsd(dst, src); }
 905   void movsd(XMMRegister dst, AddressLiteral src);
 906 
 907   void mulsd(XMMRegister dst, XMMRegister src)    { Assembler::mulsd(dst, src); }
 908   void mulsd(XMMRegister dst, Address src)        { Assembler::mulsd(dst, src); }
 909   void mulsd(XMMRegister dst, AddressLiteral src);
 910 
 911   void mulss(XMMRegister dst, XMMRegister src)    { Assembler::mulss(dst, src); }
 912   void mulss(XMMRegister dst, Address src)        { Assembler::mulss(dst, src); }
 913   void mulss(XMMRegister dst, AddressLiteral src);
 914 
 915   void sqrtsd(XMMRegister dst, XMMRegister src)    { Assembler::sqrtsd(dst, src); }
 916   void sqrtsd(XMMRegister dst, Address src)        { Assembler::sqrtsd(dst, src); }
 917   void sqrtsd(XMMRegister dst, AddressLiteral src);
 918 
 919   void sqrtss(XMMRegister dst, XMMRegister src)    { Assembler::sqrtss(dst, src); }
 920   void sqrtss(XMMRegister dst, Address src)        { Assembler::sqrtss(dst, src); }
 921   void sqrtss(XMMRegister dst, AddressLiteral src);


1010   }
1011   void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
1012     if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2
1013       Assembler::vpxor(dst, nds, src, vector256);
1014     else
1015       Assembler::vxorpd(dst, nds, src, vector256);
1016   }
1017 
1018   // Simple version for AVX2 256bit vectors
1019   void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); }
1020   void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }
1021 
1022   // Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector.
1023   void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1024     if (UseAVX > 1) // vinserti128h is available only in AVX2
1025       Assembler::vinserti128h(dst, nds, src);
1026     else
1027       Assembler::vinsertf128h(dst, nds, src);
1028   }
1029 










1030   // Data
1031 
1032   void cmov32( Condition cc, Register dst, Address  src);
1033   void cmov32( Condition cc, Register dst, Register src);
1034 
1035   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1036 
1037   void cmovptr(Condition cc, Register dst, Address  src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1038   void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1039 
1040   void movoop(Register dst, jobject obj);
1041   void movoop(Address dst, jobject obj);
1042 
1043   void mov_metadata(Register dst, Metadata* obj);
1044   void mov_metadata(Address dst, Metadata* obj);
1045 
1046   void movptr(ArrayAddress dst, Register src);
1047   // can this do an lea?
1048   void movptr(Register dst, ArrayAddress src);
1049 


1126 
1127   // Compare strings.
1128   void string_compare(Register str1, Register str2,
1129                       Register cnt1, Register cnt2, Register result,
1130                       XMMRegister vec1);
1131 
1132   // Compare char[] arrays.
1133   void char_arrays_equals(bool is_array_equ, Register ary1, Register ary2,
1134                           Register limit, Register result, Register chr,
1135                           XMMRegister vec1, XMMRegister vec2);
1136 
1137   // Fill primitive arrays
1138   void generate_fill(BasicType t, bool aligned,
1139                      Register to, Register value, Register count,
1140                      Register rtmp, XMMRegister xtmp);
1141 
1142   void encode_iso_array(Register src, Register dst, Register len,
1143                         XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1144                         XMMRegister tmp4, Register tmp5, Register result);
1145 










1146 #undef VIRTUAL
1147 
1148 };
1149 
1150 /**
1151  * class SkipIfEqual:
1152  *
1153  * Instantiating this class will result in assembly code being output that will
1154  * jump around any code emitted between the creation of the instance and it's
1155  * automatic destruction at the end of a scope block, depending on the value of
1156  * the flag passed to the constructor, which will be checked at run-time.
1157  */
1158 class SkipIfEqual {
1159  private:
1160   MacroAssembler* _masm;
1161   Label _label;
1162 
1163  public:
1164    SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value);
1165    ~SkipIfEqual();
   1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 882   void addsd(XMMRegister dst, AddressLiteral src);
 883 
 884   void addss(XMMRegister dst, XMMRegister src)    { Assembler::addss(dst, src); }
 885   void addss(XMMRegister dst, Address src)        { Assembler::addss(dst, src); }
 886   void addss(XMMRegister dst, AddressLiteral src);
 887 
 888   void divsd(XMMRegister dst, XMMRegister src)    { Assembler::divsd(dst, src); }
 889   void divsd(XMMRegister dst, Address src)        { Assembler::divsd(dst, src); }
 890   void divsd(XMMRegister dst, AddressLiteral src);
 891 
 892   void divss(XMMRegister dst, XMMRegister src)    { Assembler::divss(dst, src); }
 893   void divss(XMMRegister dst, Address src)        { Assembler::divss(dst, src); }
 894   void divss(XMMRegister dst, AddressLiteral src);
 895 
 896   // Move Unaligned Double Quadword
 897   void movdqu(Address     dst, XMMRegister src)   { Assembler::movdqu(dst, src); }
 898   void movdqu(XMMRegister dst, Address src)       { Assembler::movdqu(dst, src); }
 899   void movdqu(XMMRegister dst, XMMRegister src)   { Assembler::movdqu(dst, src); }
 900   void movdqu(XMMRegister dst, AddressLiteral src);
 901 
 902   // Move Aligned Double Quadword
 903   void movdqa(XMMRegister dst, Address src)       { Assembler::movdqa(dst, src); }
 904   void movdqa(XMMRegister dst, XMMRegister src)   { Assembler::movdqa(dst, src); }
 905   void movdqa(XMMRegister dst, AddressLiteral src);
 906 
 907   void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
 908   void movsd(Address dst, XMMRegister src)     { Assembler::movsd(dst, src); }
 909   void movsd(XMMRegister dst, Address src)     { Assembler::movsd(dst, src); }
 910   void movsd(XMMRegister dst, AddressLiteral src);
 911 
 912   void mulsd(XMMRegister dst, XMMRegister src)    { Assembler::mulsd(dst, src); }
 913   void mulsd(XMMRegister dst, Address src)        { Assembler::mulsd(dst, src); }
 914   void mulsd(XMMRegister dst, AddressLiteral src);
 915 
 916   void mulss(XMMRegister dst, XMMRegister src)    { Assembler::mulss(dst, src); }
 917   void mulss(XMMRegister dst, Address src)        { Assembler::mulss(dst, src); }
 918   void mulss(XMMRegister dst, AddressLiteral src);
 919 
 920   void sqrtsd(XMMRegister dst, XMMRegister src)    { Assembler::sqrtsd(dst, src); }
 921   void sqrtsd(XMMRegister dst, Address src)        { Assembler::sqrtsd(dst, src); }
 922   void sqrtsd(XMMRegister dst, AddressLiteral src);
 923 
 924   void sqrtss(XMMRegister dst, XMMRegister src)    { Assembler::sqrtss(dst, src); }
 925   void sqrtss(XMMRegister dst, Address src)        { Assembler::sqrtss(dst, src); }
 926   void sqrtss(XMMRegister dst, AddressLiteral src);


1015   }
1016   void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
1017     if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2
1018       Assembler::vpxor(dst, nds, src, vector256);
1019     else
1020       Assembler::vxorpd(dst, nds, src, vector256);
1021   }
1022 
1023   // Simple version for AVX2 256bit vectors
1024   void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); }
1025   void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }
1026 
1027   // Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector.
1028   void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1029     if (UseAVX > 1) // vinserti128h is available only in AVX2
1030       Assembler::vinserti128h(dst, nds, src);
1031     else
1032       Assembler::vinsertf128h(dst, nds, src);
1033   }
1034 
1035   // Carry-Less Multiplication Quadword
1036   void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1037     // 0x00 - multiply lower 64 bits [0:63]
1038     Assembler::vpclmulqdq(dst, nds, src, 0x00);
1039   }
1040   void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1041     // 0x11 - multiply upper 64 bits [64:127]
1042     Assembler::vpclmulqdq(dst, nds, src, 0x11);
1043   }
1044 
1045   // Data
1046 
1047   void cmov32( Condition cc, Register dst, Address  src);
1048   void cmov32( Condition cc, Register dst, Register src);
1049 
1050   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1051 
1052   void cmovptr(Condition cc, Register dst, Address  src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1053   void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1054 
1055   void movoop(Register dst, jobject obj);
1056   void movoop(Address dst, jobject obj);
1057 
1058   void mov_metadata(Register dst, Metadata* obj);
1059   void mov_metadata(Address dst, Metadata* obj);
1060 
1061   void movptr(ArrayAddress dst, Register src);
1062   // can this do an lea?
1063   void movptr(Register dst, ArrayAddress src);
1064 


1141 
1142   // Compare strings.
1143   void string_compare(Register str1, Register str2,
1144                       Register cnt1, Register cnt2, Register result,
1145                       XMMRegister vec1);
1146 
1147   // Compare char[] arrays.
1148   void char_arrays_equals(bool is_array_equ, Register ary1, Register ary2,
1149                           Register limit, Register result, Register chr,
1150                           XMMRegister vec1, XMMRegister vec2);
1151 
1152   // Fill primitive arrays
1153   void generate_fill(BasicType t, bool aligned,
1154                      Register to, Register value, Register count,
1155                      Register rtmp, XMMRegister xtmp);
1156 
1157   void encode_iso_array(Register src, Register dst, Register len,
1158                         XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1159                         XMMRegister tmp4, Register tmp5, Register result);
1160 
1161   // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic.
1162   void update_byte_crc32(Register crc, Register val, Register table);
1163   void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
1164   // Fold 128-bit data chunk
1165   void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
1166   void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
1167   // Fold 8-bit data
1168   void fold_8bit_crc32(Register crc, Register table, Register tmp);
1169   void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
1170 
1171 #undef VIRTUAL
1172 
1173 };
1174 
1175 /**
1176  * class SkipIfEqual:
1177  *
1178  * Instantiating this class will result in assembly code being output that will
1179  * jump around any code emitted between the creation of the instance and it's
1180  * automatic destruction at the end of a scope block, depending on the value of
1181  * the flag passed to the constructor, which will be checked at run-time.
1182  */
1183 class SkipIfEqual {
1184  private:
1185   MacroAssembler* _masm;
1186   Label _label;
1187 
1188  public:
1189    SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value);
1190    ~SkipIfEqual();
src/cpu/x86/vm/macroAssembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File