src/cpu/x86/vm/sharedRuntime_x86_64.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Mon Mar  7 11:24:31 2016
--- new/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Mon Mar  7 11:24:31 2016

*** 177,193 **** --- 177,193 ---- // push cpu state handles this on EVEX enabled targets if (save_vectors) { // Save upper half of YMM registers(0..15) int base_addr = XSAVE_AREA_YMM_BEGIN; for (int n = 0; n < 16; n++) { ! __ vextractf128h(Address(rsp, base_addr+n*16), as_XMMRegister(n)); ! __ vextractf128_high(Address(rsp, base_addr+n*16), as_XMMRegister(n)); } if (VM_Version::supports_evex()) { // Save upper half of ZMM registers(0..15) base_addr = XSAVE_AREA_ZMM_BEGIN; for (int n = 0; n < 16; n++) { ! __ vextractf64x4h(Address(rsp, base_addr+n*32), as_XMMRegister(n), 1); ! __ vextractf64x4_high(Address(rsp, base_addr+n*32), as_XMMRegister(n)); } // Save full ZMM registers(16..num_xmm_regs) base_addr = XSAVE_AREA_UPPERBANK; off = 0; int vector_len = Assembler::AVX_512bit;
*** 331,347 **** --- 331,347 ---- // On EVEX enabled targets everything is handled in pop fpu state if (restore_vectors) { // Restore upper half of YMM registers (0..15) int base_addr = XSAVE_AREA_YMM_BEGIN; for (int n = 0; n < 16; n++) { ! __ vinsertf128h(as_XMMRegister(n), Address(rsp, base_addr+n*16)); ! __ vinsertf128_high(as_XMMRegister(n), Address(rsp, base_addr+n*16)); } if (VM_Version::supports_evex()) { // Restore upper half of ZMM registers (0..15) base_addr = XSAVE_AREA_ZMM_BEGIN; for (int n = 0; n < 16; n++) { ! __ vinsertf64x4h(as_XMMRegister(n), Address(rsp, base_addr+n*32), 1); ! __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, base_addr+n*32)); } // Restore full ZMM registers(16..num_xmm_regs) base_addr = XSAVE_AREA_UPPERBANK; int vector_len = Assembler::AVX_512bit; int off = 0;

src/cpu/x86/vm/sharedRuntime_x86_64.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File