src/cpu/x86/vm/sharedRuntime_x86_64.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Tue Mar  1 16:13:30 2016
--- new/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Tue Mar  1 16:13:30 2016

*** 177,193 **** --- 177,193 ---- // push cpu state handles this on EVEX enabled targets if (save_vectors) { // Save upper half of YMM registers(0..15) int base_addr = XSAVE_AREA_YMM_BEGIN; for (int n = 0; n < 16; n++) { ! __ vextractf128h(Address(rsp, base_addr+n*16), as_XMMRegister(n)); ! __ vextractf128(Address(rsp, base_addr+n*16), as_XMMRegister(n), 1); } if (VM_Version::supports_evex()) { // Save upper half of ZMM registers(0..15) base_addr = XSAVE_AREA_ZMM_BEGIN; for (int n = 0; n < 16; n++) { - __ vextractf64x4h(Address(rsp, base_addr+n*32), as_XMMRegister(n), 1); } // Save full ZMM registers(16..num_xmm_regs) base_addr = XSAVE_AREA_UPPERBANK; off = 0; int vector_len = Assembler::AVX_512bit;
*** 331,347 **** --- 331,347 ---- // On EVEX enabled targets everything is handled in pop fpu state if (restore_vectors) { // Restore upper half of YMM registers (0..15) int base_addr = XSAVE_AREA_YMM_BEGIN; for (int n = 0; n < 16; n++) { ! __ vinsertf128h(as_XMMRegister(n), Address(rsp, base_addr+n*16)); ! __ vinsertf128(as_XMMRegister(n), as_XMMRegister(n), Address(rsp, base_addr+n*16), 1); } if (VM_Version::supports_evex()) { // Restore upper half of ZMM registers (0..15) base_addr = XSAVE_AREA_ZMM_BEGIN; for (int n = 0; n < 16; n++) { ! __ vinsertf64x4h(as_XMMRegister(n), Address(rsp, base_addr+n*32), 1); ! __ vinsertf64x4(as_XMMRegister(n), as_XMMRegister(n), Address(rsp, base_addr+n*32), 1); } // Restore full ZMM registers(16..num_xmm_regs) base_addr = XSAVE_AREA_UPPERBANK; int vector_len = Assembler::AVX_512bit; int off = 0;

src/cpu/x86/vm/sharedRuntime_x86_64.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File