src/cpu/x86/vm/sharedRuntime_x86_32.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File
*** old/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Fri May  8 11:59:21 2015
--- new/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Fri May  8 11:59:21 2015

*** 115,127 **** --- 115,127 ---- OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool verify_fpu, bool save_vectors) { int vect_words = 0; #ifdef COMPILER2 if (save_vectors) { ! assert(UseAVX > 0, "256bit vectors are supported only with AVX"); ! assert(MaxVectorSize == 32, "only 256bit vectors are supported now"); ! // Save upper half of YMM registes ! assert(UseAVX > 0, "512bit vectors are supported only with EVEX"); ! assert(MaxVectorSize == 64, "only 512bit vectors are supported now"); ! // Save upper half of ZMM/YMM registers : vect_words = 8 * 16 / wordSize; additional_frame_words += vect_words; } #else assert(!save_vectors, "vectors are generated only by C2");
*** 214,223 **** --- 214,234 ---- __ vextractf128h(Address(rsp, 48),xmm3); __ vextractf128h(Address(rsp, 64),xmm4); __ vextractf128h(Address(rsp, 80),xmm5); __ vextractf128h(Address(rsp, 96),xmm6); __ vextractf128h(Address(rsp,112),xmm7); + if (UseAVX > 2) { + __ subptr(rsp, 256); // Save upper half of ZMM registes + __ vextractf64x4h(Address(rsp, 0), xmm0); + __ vextractf64x4h(Address(rsp, 32), xmm1); + __ vextractf64x4h(Address(rsp, 64), xmm2); + __ vextractf64x4h(Address(rsp, 96), xmm3); + __ vextractf64x4h(Address(rsp, 128), xmm4); + __ vextractf64x4h(Address(rsp, 160), xmm5); + __ vextractf64x4h(Address(rsp, 192), xmm6); + __ vextractf64x4h(Address(rsp, 224), xmm7); + } } // Set an oopmap for the call site. This oopmap will map all // oop-registers and debug-info registers as callee-saved. This // will allow deoptimization at this safepoint to find all possible
*** 281,292 **** --- 292,303 ---- void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) { // Recover XMM & FPU state int additional_frame_bytes = 0; #ifdef COMPILER2 if (restore_vectors) { ! assert(UseAVX > 0, "256bit vectors are supported only with AVX"); ! assert(MaxVectorSize == 32, "only 256bit vectors are supported now"); ! assert(UseAVX > 0, "512bit vectors are supported only with EVEX"); ! assert(MaxVectorSize == 64, "only 512bit vectors are supported now"); additional_frame_bytes = 128; } #else assert(!restore_vectors, "vectors are generated only by C2"); #endif
*** 322,331 **** --- 333,354 ---- __ vinsertf128h(xmm4, Address(rsp, 64)); __ vinsertf128h(xmm5, Address(rsp, 80)); __ vinsertf128h(xmm6, Address(rsp, 96)); __ vinsertf128h(xmm7, Address(rsp,112)); __ addptr(rsp, additional_frame_bytes); + if (UseAVX > 2) { + additional_frame_bytes = 256; + __ vinsertf64x4h(xmm0, Address(rsp, 0)); + __ vinsertf64x4h(xmm1, Address(rsp, 32)); + __ vinsertf64x4h(xmm2, Address(rsp, 64)); + __ vinsertf64x4h(xmm3, Address(rsp, 96)); + __ vinsertf64x4h(xmm4, Address(rsp, 128)); + __ vinsertf64x4h(xmm5, Address(rsp, 160)); + __ vinsertf64x4h(xmm6, Address(rsp, 192)); + __ vinsertf64x4h(xmm7, Address(rsp, 224)); + __ addptr(rsp, additional_frame_bytes); + } } __ pop_FPU_state(); __ addptr(rsp, FPU_regs_live*wordSize); // Pop FPU registers __ popf();

src/cpu/x86/vm/sharedRuntime_x86_32.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File