121 122 // Offsets into the register save area 123 // Used by deoptimization when it is managing result register 124 // values on its own 125 126 static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; } 127 static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; } 128 static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; } 129 static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; } 130 static int return_offset_in_bytes(void) { return BytesPerInt * return_off; } 131 132 // During deoptimization only the result registers need to be restored, 133 // all the other values have already been extracted. 134 static void restore_result_registers(MacroAssembler* masm); 135 }; 136 137 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) { 138 int vect_words = 0; 139 #ifdef COMPILER2 140 if (save_vectors) { 141 assert(UseAVX > 0, "256bit vectors are supported only with AVX"); 142 assert(MaxVectorSize == 32, "only 256bit vectors are supported now"); 143 // Save upper half of YMM registes 144 vect_words = 16 * 16 / wordSize; 145 additional_frame_words += vect_words; 146 } 147 #else 148 assert(!save_vectors, "vectors are generated only by C2"); 149 #endif 150 151 // Always make the frame size 16-byte aligned 152 int frame_size_in_bytes = round_to(additional_frame_words*wordSize + 153 reg_save_size*BytesPerInt, 16); 154 // OopMap frame size is in compiler stack slots (jint's) not bytes or words 155 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt; 156 // The caller will allocate additional_frame_words 157 int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt; 158 // CodeBlob frame size is in words. 159 int frame_size_in_words = frame_size_in_bytes / wordSize; 160 *total_frame_words = frame_size_in_words; 161 162 // Save registers, fpu state, and flags. 163 // We assume caller has already pushed the return address onto the 265 map->set_callee_saved(STACK_OFFSET(xmm9H_off ), xmm9->as_VMReg()->next()); 266 map->set_callee_saved(STACK_OFFSET(xmm10H_off), xmm10->as_VMReg()->next()); 267 map->set_callee_saved(STACK_OFFSET(xmm11H_off), xmm11->as_VMReg()->next()); 268 map->set_callee_saved(STACK_OFFSET(xmm12H_off), xmm12->as_VMReg()->next()); 269 map->set_callee_saved(STACK_OFFSET(xmm13H_off), xmm13->as_VMReg()->next()); 270 map->set_callee_saved(STACK_OFFSET(xmm14H_off), xmm14->as_VMReg()->next()); 271 map->set_callee_saved(STACK_OFFSET(xmm15H_off), xmm15->as_VMReg()->next()); 272 } 273 274 return map; 275 } 276 277 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) { 278 if (frame::arg_reg_save_area_bytes != 0) { 279 // Pop arg register save area 280 __ addptr(rsp, frame::arg_reg_save_area_bytes); 281 } 282 #ifdef COMPILER2 283 if (restore_vectors) { 284 // Restore upper half of YMM registes. 285 assert(UseAVX > 0, "256bit vectors are supported only with AVX"); 286 assert(MaxVectorSize == 32, "only 256bit vectors are supported now"); 287 __ vinsertf128h(xmm0, Address(rsp, 0)); 288 __ vinsertf128h(xmm1, Address(rsp, 16)); 289 __ vinsertf128h(xmm2, Address(rsp, 32)); 290 __ vinsertf128h(xmm3, Address(rsp, 48)); 291 __ vinsertf128h(xmm4, Address(rsp, 64)); 292 __ vinsertf128h(xmm5, Address(rsp, 80)); 293 __ vinsertf128h(xmm6, Address(rsp, 96)); 294 __ vinsertf128h(xmm7, Address(rsp,112)); 295 __ vinsertf128h(xmm8, Address(rsp,128)); 296 __ vinsertf128h(xmm9, Address(rsp,144)); 297 __ vinsertf128h(xmm10, Address(rsp,160)); 298 __ vinsertf128h(xmm11, Address(rsp,176)); 299 __ vinsertf128h(xmm12, Address(rsp,192)); 300 __ vinsertf128h(xmm13, Address(rsp,208)); 301 __ vinsertf128h(xmm14, Address(rsp,224)); 302 __ vinsertf128h(xmm15, Address(rsp,240)); 303 __ addptr(rsp, 256); 304 } 305 #else 306 assert(!restore_vectors, "vectors are generated only by C2"); | 121 122 // Offsets into the register save area 123 // Used by deoptimization when it is managing result register 124 // values on its own 125 126 static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; } 127 static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; } 128 static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; } 129 static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; } 130 static int return_offset_in_bytes(void) { return BytesPerInt * return_off; } 131 132 // During deoptimization only the result registers need to be restored, 133 // all the other values have already been extracted. 134 static void restore_result_registers(MacroAssembler* masm); 135 }; 136 137 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) { 138 int vect_words = 0; 139 #ifdef COMPILER2 140 if (save_vectors) { 141 assert(UseAVX > 0, "512bit vectors are supported only with EVEX"); 142 assert(MaxVectorSize == 64, "only 512bit vectors are supported now"); 143 // Save upper half of YMM registers 144 // TODO: add ZMM save code 145 vect_words = 16 * 16 / wordSize; 146 additional_frame_words += vect_words; 147 } 148 #else 149 assert(!save_vectors, "vectors are generated only by C2"); 150 #endif 151 152 // Always make the frame size 16-byte aligned 153 int frame_size_in_bytes = round_to(additional_frame_words*wordSize + 154 reg_save_size*BytesPerInt, 16); 155 // OopMap frame size is in compiler stack slots (jint's) not bytes or words 156 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt; 157 // The caller will allocate additional_frame_words 158 int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt; 159 // CodeBlob frame size is in words. 160 int frame_size_in_words = frame_size_in_bytes / wordSize; 161 *total_frame_words = frame_size_in_words; 162 163 // Save registers, fpu state, and flags. 164 // We assume caller has already pushed the return address onto the 266 map->set_callee_saved(STACK_OFFSET(xmm9H_off ), xmm9->as_VMReg()->next()); 267 map->set_callee_saved(STACK_OFFSET(xmm10H_off), xmm10->as_VMReg()->next()); 268 map->set_callee_saved(STACK_OFFSET(xmm11H_off), xmm11->as_VMReg()->next()); 269 map->set_callee_saved(STACK_OFFSET(xmm12H_off), xmm12->as_VMReg()->next()); 270 map->set_callee_saved(STACK_OFFSET(xmm13H_off), xmm13->as_VMReg()->next()); 271 map->set_callee_saved(STACK_OFFSET(xmm14H_off), xmm14->as_VMReg()->next()); 272 map->set_callee_saved(STACK_OFFSET(xmm15H_off), xmm15->as_VMReg()->next()); 273 } 274 275 return map; 276 } 277 278 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) { 279 if (frame::arg_reg_save_area_bytes != 0) { 280 // Pop arg register save area 281 __ addptr(rsp, frame::arg_reg_save_area_bytes); 282 } 283 #ifdef COMPILER2 284 if (restore_vectors) { 285 // Restore upper half of YMM registes. 286 assert(UseAVX > 0, "512bit vectors are supported only with AVX"); 287 assert(MaxVectorSize == 64, "only 512bit vectors are supported now"); 288 __ vinsertf128h(xmm0, Address(rsp, 0)); 289 __ vinsertf128h(xmm1, Address(rsp, 16)); 290 __ vinsertf128h(xmm2, Address(rsp, 32)); 291 __ vinsertf128h(xmm3, Address(rsp, 48)); 292 __ vinsertf128h(xmm4, Address(rsp, 64)); 293 __ vinsertf128h(xmm5, Address(rsp, 80)); 294 __ vinsertf128h(xmm6, Address(rsp, 96)); 295 __ vinsertf128h(xmm7, Address(rsp,112)); 296 __ vinsertf128h(xmm8, Address(rsp,128)); 297 __ vinsertf128h(xmm9, Address(rsp,144)); 298 __ vinsertf128h(xmm10, Address(rsp,160)); 299 __ vinsertf128h(xmm11, Address(rsp,176)); 300 __ vinsertf128h(xmm12, Address(rsp,192)); 301 __ vinsertf128h(xmm13, Address(rsp,208)); 302 __ vinsertf128h(xmm14, Address(rsp,224)); 303 __ vinsertf128h(xmm15, Address(rsp,240)); 304 __ addptr(rsp, 256); 305 } 306 #else 307 assert(!restore_vectors, "vectors are generated only by C2"); |