src/cpu/x86/vm/macroAssembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/cpu/x86/vm/macroAssembler_x86.hpp	Fri Jun 28 08:59:02 2013
--- new/src/cpu/x86/vm/macroAssembler_x86.hpp	Fri Jun 28 08:59:02 2013

*** 1,7 **** --- 1,7 ---- /* ! * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. ! * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 897,906 **** --- 897,911 ---- void movdqu(Address dst, XMMRegister src) { Assembler::movdqu(dst, src); } void movdqu(XMMRegister dst, Address src) { Assembler::movdqu(dst, src); } void movdqu(XMMRegister dst, XMMRegister src) { Assembler::movdqu(dst, src); } void movdqu(XMMRegister dst, AddressLiteral src); + // Move Aligned Double Quadword + void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } + void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } + void movdqa(XMMRegister dst, AddressLiteral src); + void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } void movsd(XMMRegister dst, AddressLiteral src);
*** 1025,1034 **** --- 1030,1049 ---- Assembler::vinserti128h(dst, nds, src); else Assembler::vinsertf128h(dst, nds, src); } + // Carry-Less Multiplication Quadword + void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { + // 0x00 - multiply lower 64 bits [0:63] + Assembler::vpclmulqdq(dst, nds, src, 0x00); + } + void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { + // 0x11 - multiply upper 64 bits [64:127] + Assembler::vpclmulqdq(dst, nds, src, 0x11); + } + // Data void cmov32( Condition cc, Register dst, Address src); void cmov32( Condition cc, Register dst, Register src);
*** 1141,1150 **** --- 1156,1175 ---- void encode_iso_array(Register src, Register dst, Register len, XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, XMMRegister tmp4, Register tmp5, Register result); + // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic. + void update_byte_crc32(Register crc, Register val, Register table); + void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); + // Fold 128-bit data chunk + void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); + void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); + // Fold 8-bit data + void fold_8bit_crc32(Register crc, Register table, Register tmp); + void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); + #undef VIRTUAL }; /**

src/cpu/x86/vm/macroAssembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File