Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/x86/vm/c1_Runtime1_x86.cpp
+++ new/src/cpu/x86/vm/c1_Runtime1_x86.cpp
1 1 /*
2 2 * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 #include "incls/_precompiled.incl"
26 26 #include "incls/_c1_Runtime1_x86.cpp.incl"
27 27
28 28
29 29 // Implementation of StubAssembler
30 30
31 31 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, int args_size) {
32 32 // setup registers
33 33 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions)
34 34 assert(!(oop_result1->is_valid() || oop_result2->is_valid()) || oop_result1 != oop_result2, "registers must be different");
35 35 assert(oop_result1 != thread && oop_result2 != thread, "registers must be different");
36 36 assert(args_size >= 0, "illegal args_size");
37 37
38 38 #ifdef _LP64
39 39 mov(c_rarg0, thread);
40 40 set_num_rt_args(0); // Nothing on stack
41 41 #else
42 42 set_num_rt_args(1 + args_size);
43 43
44 44 // push java thread (becomes first argument of C function)
45 45 get_thread(thread);
46 46 push(thread);
47 47 #endif // _LP64
48 48
49 49 set_last_Java_frame(thread, noreg, rbp, NULL);
50 50
51 51 // do the call
52 52 call(RuntimeAddress(entry));
53 53 int call_offset = offset();
54 54 // verify callee-saved register
55 55 #ifdef ASSERT
56 56 guarantee(thread != rax, "change this code");
57 57 push(rax);
58 58 { Label L;
59 59 get_thread(rax);
60 60 cmpptr(thread, rax);
61 61 jcc(Assembler::equal, L);
62 62 int3();
63 63 stop("StubAssembler::call_RT: rdi not callee saved?");
64 64 bind(L);
65 65 }
66 66 pop(rax);
67 67 #endif
68 68 reset_last_Java_frame(thread, true, false);
69 69
70 70 // discard thread and arguments
71 71 NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord));
72 72
73 73 // check for pending exceptions
74 74 { Label L;
75 75 cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
76 76 jcc(Assembler::equal, L);
77 77 // exception pending => remove activation and forward to exception handler
78 78 movptr(rax, Address(thread, Thread::pending_exception_offset()));
79 79 // make sure that the vm_results are cleared
80 80 if (oop_result1->is_valid()) {
81 81 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
82 82 }
83 83 if (oop_result2->is_valid()) {
84 84 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
85 85 }
86 86 if (frame_size() == no_frame_size) {
87 87 leave();
88 88 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
89 89 } else if (_stub_id == Runtime1::forward_exception_id) {
90 90 should_not_reach_here();
91 91 } else {
92 92 jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
93 93 }
94 94 bind(L);
95 95 }
96 96 // get oop results if there are any and reset the values in the thread
97 97 if (oop_result1->is_valid()) {
98 98 movptr(oop_result1, Address(thread, JavaThread::vm_result_offset()));
99 99 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
100 100 verify_oop(oop_result1);
101 101 }
102 102 if (oop_result2->is_valid()) {
103 103 movptr(oop_result2, Address(thread, JavaThread::vm_result_2_offset()));
104 104 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
105 105 verify_oop(oop_result2);
106 106 }
107 107 return call_offset;
108 108 }
109 109
110 110
111 111 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) {
112 112 #ifdef _LP64
113 113 mov(c_rarg1, arg1);
114 114 #else
115 115 push(arg1);
116 116 #endif // _LP64
117 117 return call_RT(oop_result1, oop_result2, entry, 1);
118 118 }
119 119
120 120
121 121 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) {
122 122 #ifdef _LP64
123 123 if (c_rarg1 == arg2) {
124 124 if (c_rarg2 == arg1) {
125 125 xchgq(arg1, arg2);
126 126 } else {
127 127 mov(c_rarg2, arg2);
128 128 mov(c_rarg1, arg1);
129 129 }
130 130 } else {
131 131 mov(c_rarg1, arg1);
132 132 mov(c_rarg2, arg2);
133 133 }
134 134 #else
135 135 push(arg2);
136 136 push(arg1);
137 137 #endif // _LP64
138 138 return call_RT(oop_result1, oop_result2, entry, 2);
139 139 }
140 140
141 141
142 142 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) {
143 143 #ifdef _LP64
144 144 // if there is any conflict use the stack
145 145 if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
146 146 arg2 == c_rarg1 || arg1 == c_rarg3 ||
147 147 arg3 == c_rarg1 || arg1 == c_rarg2) {
148 148 push(arg3);
149 149 push(arg2);
150 150 push(arg1);
151 151 pop(c_rarg1);
152 152 pop(c_rarg2);
153 153 pop(c_rarg3);
154 154 } else {
155 155 mov(c_rarg1, arg1);
156 156 mov(c_rarg2, arg2);
157 157 mov(c_rarg3, arg3);
158 158 }
159 159 #else
160 160 push(arg3);
161 161 push(arg2);
162 162 push(arg1);
163 163 #endif // _LP64
164 164 return call_RT(oop_result1, oop_result2, entry, 3);
165 165 }
166 166
167 167
168 168 // Implementation of StubFrame
169 169
170 170 class StubFrame: public StackObj {
171 171 private:
172 172 StubAssembler* _sasm;
173 173
174 174 public:
175 175 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);
176 176 void load_argument(int offset_in_words, Register reg);
177 177
178 178 ~StubFrame();
179 179 };
180 180
181 181
182 182 #define __ _sasm->
183 183
184 184 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
185 185 _sasm = sasm;
186 186 __ set_info(name, must_gc_arguments);
187 187 __ enter();
188 188 }
189 189
190 190 // load parameters that were stored with LIR_Assembler::store_parameter
191 191 // Note: offsets for store_parameter and load_argument must match
192 192 void StubFrame::load_argument(int offset_in_words, Register reg) {
193 193 // rbp, + 0: link
194 194 // + 1: return address
195 195 // + 2: argument with offset 0
196 196 // + 3: argument with offset 1
197 197 // + 4: ...
198 198
199 199 __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord));
200 200 }
201 201
202 202
203 203 StubFrame::~StubFrame() {
204 204 __ leave();
205 205 __ ret(0);
206 206 }
207 207
208 208 #undef __
209 209
210 210
211 211 // Implementation of Runtime1
212 212
213 213 #define __ sasm->
214 214
215 215 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2;
216 216 const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2;
217 217
218 218 // Stack layout for saving/restoring all the registers needed during a runtime
219 219 // call (this includes deoptimization)
220 220 // Note: note that users of this frame may well have arguments to some runtime
221 221 // while these values are on the stack. These positions neglect those arguments
222 222 // but the code in save_live_registers will take the argument count into
223 223 // account.
224 224 //
225 225 #ifdef _LP64
226 226 #define SLOT2(x) x,
227 227 #define SLOT_PER_WORD 2
228 228 #else
229 229 #define SLOT2(x)
230 230 #define SLOT_PER_WORD 1
231 231 #endif // _LP64
232 232
233 233 enum reg_save_layout {
234 234 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that
235 235 // happen and will assert if the stack size we create is misaligned
236 236 #ifdef _LP64
237 237 align_dummy_0, align_dummy_1,
238 238 #endif // _LP64
239 239 dummy1, SLOT2(dummy1H) // 0, 4
240 240 dummy2, SLOT2(dummy2H) // 8, 12
241 241 // Two temps to be used as needed by users of save/restore callee registers
242 242 temp_2_off, SLOT2(temp_2H_off) // 16, 20
243 243 temp_1_off, SLOT2(temp_1H_off) // 24, 28
244 244 xmm_regs_as_doubles_off, // 32
245 245 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160
246 246 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224
247 247 // fpu_state_end_off is exclusive
248 248 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352
249 249 marker = fpu_state_end_off, SLOT2(markerH) // 352, 356
250 250 extra_space_offset, // 360
251 251 #ifdef _LP64
252 252 r15_off = extra_space_offset, r15H_off, // 360, 364
253 253 r14_off, r14H_off, // 368, 372
254 254 r13_off, r13H_off, // 376, 380
255 255 r12_off, r12H_off, // 384, 388
256 256 r11_off, r11H_off, // 392, 396
257 257 r10_off, r10H_off, // 400, 404
258 258 r9_off, r9H_off, // 408, 412
259 259 r8_off, r8H_off, // 416, 420
260 260 rdi_off, rdiH_off, // 424, 428
261 261 #else
262 262 rdi_off = extra_space_offset,
263 263 #endif // _LP64
264 264 rsi_off, SLOT2(rsiH_off) // 432, 436
265 265 rbp_off, SLOT2(rbpH_off) // 440, 444
266 266 rsp_off, SLOT2(rspH_off) // 448, 452
267 267 rbx_off, SLOT2(rbxH_off) // 456, 460
268 268 rdx_off, SLOT2(rdxH_off) // 464, 468
269 269 rcx_off, SLOT2(rcxH_off) // 472, 476
270 270 rax_off, SLOT2(raxH_off) // 480, 484
271 271 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492
272 272 return_off, SLOT2(returnH_off) // 496, 500
273 273 reg_save_frame_size, // As noted: neglects any parameters to runtime // 504
274 274
275 275 #ifdef _WIN64
276 276 c_rarg0_off = rcx_off,
277 277 #else
278 278 c_rarg0_off = rdi_off,
279 279 #endif // WIN64
280 280
281 281 // equates
282 282
283 283 // illegal instruction handler
284 284 continue_dest_off = temp_1_off,
285 285
286 286 // deoptimization equates
287 287 fp0_off = float_regs_as_doubles_off, // slot for java float/double return value
288 288 xmm0_off = xmm_regs_as_doubles_off, // slot for java float/double return value
289 289 deopt_type = temp_2_off, // slot for type of deopt in progress
290 290 ret_type = temp_1_off // slot for return type
291 291 };
292 292
293 293
294 294
295 295 // Save off registers which might be killed by calls into the runtime.
296 296 // Tries to smart of about FP registers. In particular we separate
297 297 // saving and describing the FPU registers for deoptimization since we
298 298 // have to save the FPU registers twice if we describe them and on P4
299 299 // saving FPU registers which don't contain anything appears
300 300 // expensive. The deopt blob is the only thing which needs to
301 301 // describe FPU registers. In all other cases it should be sufficient
302 302 // to simply save their current value.
303 303
304 304 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
305 305 bool save_fpu_registers = true) {
306 306
307 307 // In 64bit all the args are in regs so there are no additional stack slots
308 308 LP64_ONLY(num_rt_args = 0);
309 309 LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");)
310 310 int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread
311 311 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word );
312 312
313 313 // record saved value locations in an OopMap
314 314 // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread
315 315 OopMap* map = new OopMap(frame_size_in_slots, 0);
316 316 map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg());
317 317 map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg());
318 318 map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg());
319 319 map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg());
320 320 map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg());
321 321 map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg());
322 322 #ifdef _LP64
323 323 map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg());
324 324 map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg());
325 325 map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg());
326 326 map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg());
327 327 map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg());
328 328 map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg());
329 329 map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg());
330 330 map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg());
331 331
332 332 // This is stupid but needed.
333 333 map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next());
334 334 map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next());
335 335 map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next());
336 336 map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next());
337 337 map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next());
338 338 map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next());
339 339
340 340 map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next());
341 341 map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next());
342 342 map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next());
343 343 map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next());
344 344 map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next());
345 345 map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next());
346 346 map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next());
347 347 map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next());
348 348 #endif // _LP64
349 349
350 350 if (save_fpu_registers) {
351 351 if (UseSSE < 2) {
352 352 int fpu_off = float_regs_as_doubles_off;
353 353 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
354 354 VMReg fpu_name_0 = FrameMap::fpu_regname(n);
355 355 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0);
356 356 // %%% This is really a waste but we'll keep things as they were for now
357 357 if (true) {
358 358 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next());
359 359 }
360 360 fpu_off += 2;
361 361 }
362 362 assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots");
363 363 }
364 364
365 365 if (UseSSE >= 2) {
366 366 int xmm_off = xmm_regs_as_doubles_off;
367 367 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
368 368 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
369 369 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0);
370 370 // %%% This is really a waste but we'll keep things as they were for now
371 371 if (true) {
372 372 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next());
373 373 }
374 374 xmm_off += 2;
375 375 }
376 376 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
377 377
378 378 } else if (UseSSE == 1) {
379 379 int xmm_off = xmm_regs_as_doubles_off;
380 380 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
381 381 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
382 382 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0);
383 383 xmm_off += 2;
384 384 }
385 385 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
386 386 }
387 387 }
388 388
389 389 return map;
390 390 }
391 391
392 392 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
393 393 bool save_fpu_registers = true) {
394 394 __ block_comment("save_live_registers");
395 395
396 396 // 64bit passes the args in regs to the c++ runtime
397 397 int frame_size_in_slots = reg_save_frame_size NOT_LP64(+ num_rt_args); // args + thread
398 398 // frame_size = round_to(frame_size, 4);
399 399 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word );
400 400
401 401 __ pusha(); // integer registers
402 402
403 403 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset");
404 404 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset");
405 405
406 406 __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
407 407
408 408 #ifdef ASSERT
409 409 __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
410 410 #endif
411 411
412 412 if (save_fpu_registers) {
413 413 if (UseSSE < 2) {
414 414 // save FPU stack
415 415 __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
416 416 __ fwait();
417 417
418 418 #ifdef ASSERT
419 419 Label ok;
420 420 __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
421 421 __ jccb(Assembler::equal, ok);
422 422 __ stop("corrupted control word detected");
423 423 __ bind(ok);
424 424 #endif
425 425
426 426 // Reset the control word to guard against exceptions being unmasked
427 427 // since fstp_d can cause FPU stack underflow exceptions. Write it
428 428 // into the on stack copy and then reload that to make sure that the
429 429 // current and future values are correct.
430 430 __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
431 431 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
432 432
433 433 // Save the FPU registers in de-opt-able form
434 434 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
435 435 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
436 436 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
437 437 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
438 438 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
439 439 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
440 440 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
441 441 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
442 442 }
443 443
444 444 if (UseSSE >= 2) {
445 445 // save XMM registers
446 446 // XMM registers can contain float or double values, but this is not known here,
447 447 // so always save them as doubles.
448 448 // note that float values are _not_ converted automatically, so for float values
449 449 // the second word contains only garbage data.
450 450 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0);
451 451 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1);
452 452 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
453 453 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
454 454 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
455 455 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
456 456 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
457 457 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
458 458 #ifdef _LP64
459 459 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8);
460 460 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9);
461 461 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10);
462 462 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11);
463 463 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12);
464 464 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13);
465 465 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14);
466 466 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15);
467 467 #endif // _LP64
468 468 } else if (UseSSE == 1) {
469 469 // save XMM registers as float because double not supported without SSE2
470 470 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0);
471 471 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1);
472 472 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
473 473 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
474 474 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
475 475 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
476 476 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
477 477 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
478 478 }
479 479 }
480 480
481 481 // FPU stack must be empty now
482 482 __ verify_FPU(0, "save_live_registers");
483 483
484 484 return generate_oop_map(sasm, num_rt_args, save_fpu_registers);
485 485 }
486 486
487 487
488 488 static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) {
489 489 if (restore_fpu_registers) {
490 490 if (UseSSE >= 2) {
491 491 // restore XMM registers
492 492 __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
493 493 __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
494 494 __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
495 495 __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
496 496 __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
497 497 __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
498 498 __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
499 499 __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
500 500 #ifdef _LP64
501 501 __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64));
502 502 __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72));
503 503 __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80));
504 504 __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88));
505 505 __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96));
506 506 __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104));
507 507 __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112));
508 508 __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120));
509 509 #endif // _LP64
510 510 } else if (UseSSE == 1) {
511 511 // restore XMM registers
512 512 __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
513 513 __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
514 514 __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
515 515 __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
516 516 __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
517 517 __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
518 518 __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
519 519 __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
520 520 }
521 521
522 522 if (UseSSE < 2) {
523 523 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
524 524 } else {
525 525 // check that FPU stack is really empty
526 526 __ verify_FPU(0, "restore_live_registers");
527 527 }
528 528
529 529 } else {
530 530 // check that FPU stack is really empty
531 531 __ verify_FPU(0, "restore_live_registers");
532 532 }
533 533
534 534 #ifdef ASSERT
535 535 {
536 536 Label ok;
537 537 __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
538 538 __ jcc(Assembler::equal, ok);
539 539 __ stop("bad offsets in frame");
540 540 __ bind(ok);
541 541 }
542 542 #endif // ASSERT
543 543
544 544 __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
545 545 }
546 546
547 547
548 548 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
549 549 __ block_comment("restore_live_registers");
550 550
551 551 restore_fpu(sasm, restore_fpu_registers);
552 552 __ popa();
553 553 }
554 554
555 555
556 556 static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) {
557 557 __ block_comment("restore_live_registers_except_rax");
558 558
559 559 restore_fpu(sasm, restore_fpu_registers);
560 560
561 561 #ifdef _LP64
562 562 __ movptr(r15, Address(rsp, 0));
563 563 __ movptr(r14, Address(rsp, wordSize));
564 564 __ movptr(r13, Address(rsp, 2 * wordSize));
565 565 __ movptr(r12, Address(rsp, 3 * wordSize));
566 566 __ movptr(r11, Address(rsp, 4 * wordSize));
567 567 __ movptr(r10, Address(rsp, 5 * wordSize));
568 568 __ movptr(r9, Address(rsp, 6 * wordSize));
569 569 __ movptr(r8, Address(rsp, 7 * wordSize));
570 570 __ movptr(rdi, Address(rsp, 8 * wordSize));
571 571 __ movptr(rsi, Address(rsp, 9 * wordSize));
572 572 __ movptr(rbp, Address(rsp, 10 * wordSize));
573 573 // skip rsp
574 574 __ movptr(rbx, Address(rsp, 12 * wordSize));
575 575 __ movptr(rdx, Address(rsp, 13 * wordSize));
576 576 __ movptr(rcx, Address(rsp, 14 * wordSize));
577 577
578 578 __ addptr(rsp, 16 * wordSize);
579 579 #else
580 580
581 581 __ pop(rdi);
582 582 __ pop(rsi);
583 583 __ pop(rbp);
584 584 __ pop(rbx); // skip this value
585 585 __ pop(rbx);
586 586 __ pop(rdx);
587 587 __ pop(rcx);
588 588 __ addptr(rsp, BytesPerWord);
589 589 #endif // _LP64
590 590 }
591 591
592 592
593 593 void Runtime1::initialize_pd() {
594 594 // nothing to do
595 595 }
596 596
597 597
598 598 // target: the entry point of the method that creates and posts the exception oop
599 599 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved)
600 600
601 601 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
602 602 // preserve all registers
603 603 int num_rt_args = has_argument ? 2 : 1;
604 604 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
605 605
606 606 // now all registers are saved and can be used freely
607 607 // verify that no old value is used accidentally
608 608 __ invalidate_registers(true, true, true, true, true, true);
609 609
610 610 // registers used by this stub
611 611 const Register temp_reg = rbx;
612 612
613 613 // load argument for exception that is passed as an argument into the stub
614 614 if (has_argument) {
615 615 #ifdef _LP64
616 616 __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord));
617 617 #else
618 618 __ movptr(temp_reg, Address(rbp, 2*BytesPerWord));
619 619 __ push(temp_reg);
620 620 #endif // _LP64
621 621 }
622 622 int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1);
623 623
624 624 OopMapSet* oop_maps = new OopMapSet();
625 625 oop_maps->add_gc_map(call_offset, oop_map);
626 626
627 627 __ stop("should not reach here");
628 628
629 629 return oop_maps;
630 630 }
631 631
632 632
633 633 void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map, bool save_fpu_registers) {
634 634 // incoming parameters
635 635 const Register exception_oop = rax;
636 636 const Register exception_pc = rdx;
637 637 // other registers used in this stub
638 638 const Register real_return_addr = rbx;
639 639 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
640 640
641 641 __ block_comment("generate_handle_exception");
642 642
643 643 #ifdef TIERED
644 644 // C2 can leave the fpu stack dirty
645 645 if (UseSSE < 2 ) {
646 646 __ empty_FPU_stack();
647 647 }
648 648 #endif // TIERED
649 649
650 650 // verify that only rax, and rdx is valid at this time
651 651 __ invalidate_registers(false, true, true, false, true, true);
652 652 // verify that rax, contains a valid exception
653 653 __ verify_not_null_oop(exception_oop);
654 654
655 655 // load address of JavaThread object for thread-local data
656 656 NOT_LP64(__ get_thread(thread);)
657 657
658 658 #ifdef ASSERT
659 659 // check that fields in JavaThread for exception oop and issuing pc are
660 660 // empty before writing to them
661 661 Label oop_empty;
662 662 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD);
663 663 __ jcc(Assembler::equal, oop_empty);
664 664 __ stop("exception oop already set");
665 665 __ bind(oop_empty);
666 666
667 667 Label pc_empty;
668 668 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
669 669 __ jcc(Assembler::equal, pc_empty);
670 670 __ stop("exception pc already set");
671 671 __ bind(pc_empty);
672 672 #endif
673 673
674 674 // save exception oop and issuing pc into JavaThread
675 675 // (exception handler will load it from here)
676 676 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop);
677 677 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc);
678 678
679 679 // save real return address (pc that called this stub)
680 680 __ movptr(real_return_addr, Address(rbp, 1*BytesPerWord));
681 681 __ movptr(Address(rsp, temp_1_off * VMRegImpl::stack_slot_size), real_return_addr);
682 682
683 683 // patch throwing pc into return address (has bci & oop map)
684 684 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc);
685 685
686 686 // compute the exception handler.
687 687 // the exception oop and the throwing pc are read from the fields in JavaThread
688 688 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
689 689 oop_maps->add_gc_map(call_offset, oop_map);
690 690
691 691 // rax,: handler address
692 692 // will be the deopt blob if nmethod was deoptimized while we looked up
693 693 // handler regardless of whether handler existed in the nmethod.
694 694
695 695 // only rax, is valid at this time, all other registers have been destroyed by the runtime call
696 696 __ invalidate_registers(false, true, true, true, true, true);
697 697
698 698 #ifdef ASSERT
699 699 // Do we have an exception handler in the nmethod?
700 700 Label done;
701 701 __ testptr(rax, rax);
702 702 __ jcc(Assembler::notZero, done);
703 703 __ stop("no handler found");
704 704 __ bind(done);
705 705 #endif
706 706
707 707 // exception handler found
708 708 // patch the return address -> the stub will directly return to the exception handler
709 709 __ movptr(Address(rbp, 1*BytesPerWord), rax);
710 710
711 711 // restore registers
712 712 restore_live_registers(sasm, save_fpu_registers);
713 713
714 714 // return to exception handler
715 715 __ leave();
716 716 __ ret(0);
717 717
718 718 }
719 719
720 720
721 721 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
722 722 // incoming parameters
723 723 const Register exception_oop = rax;
724 724 // callee-saved copy of exception_oop during runtime call
725 725 const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14);
726 726 // other registers used in this stub
727 727 const Register exception_pc = rdx;
728 728 const Register handler_addr = rbx;
729 729 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
730 730
731 731 // verify that only rax, is valid at this time
732 732 __ invalidate_registers(false, true, true, true, true, true);
733 733
734 734 #ifdef ASSERT
735 735 // check that fields in JavaThread for exception oop and issuing pc are empty
736 736 NOT_LP64(__ get_thread(thread);)
737 737 Label oop_empty;
738 738 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0);
739 739 __ jcc(Assembler::equal, oop_empty);
740 740 __ stop("exception oop must be empty");
741 741 __ bind(oop_empty);
742 742
743 743 Label pc_empty;
744 744 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
745 745 __ jcc(Assembler::equal, pc_empty);
746 746 __ stop("exception pc must be empty");
747 747 __ bind(pc_empty);
748 748 #endif
749 749
750 750 // clear the FPU stack in case any FPU results are left behind
751 751 __ empty_FPU_stack();
752 752
753 753 // save exception_oop in callee-saved register to preserve it during runtime calls
754 754 __ verify_not_null_oop(exception_oop);
755 755 __ movptr(exception_oop_callee_saved, exception_oop);
756 756
757 757 NOT_LP64(__ get_thread(thread);)
758 758 // Get return address (is on top of stack after leave).
759 759 __ movptr(exception_pc, Address(rsp, 0));
760 760
761 761 // search the exception handler address of the caller (using the return address)
762 762 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
763 763 // rax: exception handler address of the caller
764 764
765 765 // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call.
766 766 __ invalidate_registers(false, true, true, true, false, true);
767 767
768 768 // move result of call into correct register
769 769 __ movptr(handler_addr, rax);
770 770
771 771 // Restore exception oop to RAX (required convention of exception handler).
772 772 __ movptr(exception_oop, exception_oop_callee_saved);
773 773
774 774 // verify that there is really a valid exception in rax
↓ open down ↓ |
774 lines elided |
↑ open up ↑ |
775 775 __ verify_not_null_oop(exception_oop);
776 776
777 777 // get throwing pc (= return address).
778 778 // rdx has been destroyed by the call, so it must be set again
779 779 // the pop is also necessary to simulate the effect of a ret(0)
780 780 __ pop(exception_pc);
781 781
782 782 // Restore SP from BP if the exception PC is a MethodHandle call site.
783 783 NOT_LP64(__ get_thread(thread);)
784 784 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
785 - __ cmovptr(Assembler::notEqual, rsp, rbp);
785 + __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
786 786
787 787 // continue at exception handler (return address removed)
788 788 // note: do *not* remove arguments when unwinding the
789 789 // activation since the caller assumes having
790 790 // all arguments on the stack when entering the
791 791 // runtime to determine the exception handler
792 792 // (GC happens at call site with arguments!)
793 793 // rax: exception oop
794 794 // rdx: throwing pc
795 795 // rbx: exception handler
796 796 __ jmp(handler_addr);
797 797 }
798 798
799 799
800 800 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
801 801 // use the maximum number of runtime-arguments here because it is difficult to
802 802 // distinguish each RT-Call.
803 803 // Note: This number affects also the RT-Call in generate_handle_exception because
804 804 // the oop-map is shared for all calls.
805 805 const int num_rt_args = 2; // thread + dummy
806 806
807 807 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
808 808 assert(deopt_blob != NULL, "deoptimization blob must have been created");
809 809
810 810 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
811 811
812 812 #ifdef _LP64
813 813 const Register thread = r15_thread;
814 814 // No need to worry about dummy
815 815 __ mov(c_rarg0, thread);
816 816 #else
817 817 __ push(rax); // push dummy
818 818
819 819 const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions)
820 820 // push java thread (becomes first argument of C function)
821 821 __ get_thread(thread);
822 822 __ push(thread);
823 823 #endif // _LP64
824 824 __ set_last_Java_frame(thread, noreg, rbp, NULL);
825 825 // do the call
826 826 __ call(RuntimeAddress(target));
827 827 OopMapSet* oop_maps = new OopMapSet();
828 828 oop_maps->add_gc_map(__ offset(), oop_map);
829 829 // verify callee-saved register
830 830 #ifdef ASSERT
831 831 guarantee(thread != rax, "change this code");
832 832 __ push(rax);
833 833 { Label L;
834 834 __ get_thread(rax);
835 835 __ cmpptr(thread, rax);
836 836 __ jcc(Assembler::equal, L);
837 837 __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?");
838 838 __ bind(L);
839 839 }
840 840 __ pop(rax);
841 841 #endif
842 842 __ reset_last_Java_frame(thread, true, false);
843 843 #ifndef _LP64
844 844 __ pop(rcx); // discard thread arg
845 845 __ pop(rcx); // discard dummy
846 846 #endif // _LP64
847 847
848 848 // check for pending exceptions
849 849 { Label L;
850 850 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
851 851 __ jcc(Assembler::equal, L);
852 852 // exception pending => remove activation and forward to exception handler
853 853
854 854 __ testptr(rax, rax); // have we deoptimized?
855 855 __ jump_cc(Assembler::equal,
856 856 RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
857 857
858 858 // the deopt blob expects exceptions in the special fields of
859 859 // JavaThread, so copy and clear pending exception.
860 860
861 861 // load and clear pending exception
862 862 __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
863 863 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
864 864
865 865 // check that there is really a valid exception
866 866 __ verify_not_null_oop(rax);
867 867
868 868 // load throwing pc: this is the return address of the stub
869 869 __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size));
870 870
871 871 #ifdef ASSERT
872 872 // check that fields in JavaThread for exception oop and issuing pc are empty
873 873 Label oop_empty;
874 874 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
875 875 __ jcc(Assembler::equal, oop_empty);
876 876 __ stop("exception oop must be empty");
877 877 __ bind(oop_empty);
878 878
879 879 Label pc_empty;
880 880 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
881 881 __ jcc(Assembler::equal, pc_empty);
882 882 __ stop("exception pc must be empty");
883 883 __ bind(pc_empty);
884 884 #endif
885 885
886 886 // store exception oop and throwing pc to JavaThread
887 887 __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax);
888 888 __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx);
889 889
890 890 restore_live_registers(sasm);
891 891
892 892 __ leave();
893 893 __ addptr(rsp, BytesPerWord); // remove return address from stack
894 894
895 895 // Forward the exception directly to deopt blob. We can blow no
896 896 // registers and must leave throwing pc on the stack. A patch may
897 897 // have values live in registers so the entry point with the
898 898 // exception in tls.
899 899 __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls()));
900 900
901 901 __ bind(L);
902 902 }
903 903
904 904
905 905 // Runtime will return true if the nmethod has been deoptimized during
906 906 // the patching process. In that case we must do a deopt reexecute instead.
907 907
908 908 Label reexecuteEntry, cont;
909 909
910 910 __ testptr(rax, rax); // have we deoptimized?
911 911 __ jcc(Assembler::equal, cont); // no
912 912
913 913 // Will reexecute. Proper return address is already on the stack we just restore
914 914 // registers, pop all of our frame but the return address and jump to the deopt blob
915 915 restore_live_registers(sasm);
916 916 __ leave();
917 917 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
918 918
919 919 __ bind(cont);
920 920 restore_live_registers(sasm);
921 921 __ leave();
922 922 __ ret(0);
923 923
924 924 return oop_maps;
925 925
926 926 }
927 927
928 928
929 929 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
930 930
931 931 // for better readability
932 932 const bool must_gc_arguments = true;
933 933 const bool dont_gc_arguments = false;
934 934
935 935 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu
936 936 bool save_fpu_registers = true;
937 937
938 938 // stub code & info for the different stubs
939 939 OopMapSet* oop_maps = NULL;
940 940 switch (id) {
941 941 case forward_exception_id:
942 942 {
943 943 // we're handling an exception in the context of a compiled
944 944 // frame. The registers have been saved in the standard
945 945 // places. Perform an exception lookup in the caller and
946 946 // dispatch to the handler if found. Otherwise unwind and
947 947 // dispatch to the callers exception handler.
948 948
949 949 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
950 950 const Register exception_oop = rax;
951 951 const Register exception_pc = rdx;
952 952
953 953 // load pending exception oop into rax,
954 954 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
955 955 // clear pending exception
956 956 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
957 957
958 958 // load issuing PC (the return address for this stub) into rdx
959 959 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord));
960 960
961 961 // make sure that the vm_results are cleared (may be unnecessary)
962 962 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
963 963 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
964 964
965 965 // verify that that there is really a valid exception in rax,
966 966 __ verify_not_null_oop(exception_oop);
967 967
968 968
969 969 oop_maps = new OopMapSet();
970 970 OopMap* oop_map = generate_oop_map(sasm, 1);
971 971 generate_handle_exception(sasm, oop_maps, oop_map);
972 972 __ stop("should not reach here");
973 973 }
974 974 break;
975 975
976 976 case new_instance_id:
977 977 case fast_new_instance_id:
978 978 case fast_new_instance_init_check_id:
979 979 {
980 980 Register klass = rdx; // Incoming
981 981 Register obj = rax; // Result
982 982
983 983 if (id == new_instance_id) {
984 984 __ set_info("new_instance", dont_gc_arguments);
985 985 } else if (id == fast_new_instance_id) {
986 986 __ set_info("fast new_instance", dont_gc_arguments);
987 987 } else {
988 988 assert(id == fast_new_instance_init_check_id, "bad StubID");
989 989 __ set_info("fast new_instance init check", dont_gc_arguments);
990 990 }
991 991
992 992 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
993 993 UseTLAB && FastTLABRefill) {
994 994 Label slow_path;
995 995 Register obj_size = rcx;
996 996 Register t1 = rbx;
997 997 Register t2 = rsi;
998 998 assert_different_registers(klass, obj, obj_size, t1, t2);
999 999
1000 1000 __ push(rdi);
1001 1001 __ push(rbx);
1002 1002
1003 1003 if (id == fast_new_instance_init_check_id) {
1004 1004 // make sure the klass is initialized
1005 1005 __ cmpl(Address(klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
1006 1006 __ jcc(Assembler::notEqual, slow_path);
1007 1007 }
1008 1008
1009 1009 #ifdef ASSERT
1010 1010 // assert object can be fast path allocated
1011 1011 {
1012 1012 Label ok, not_ok;
1013 1013 __ movl(obj_size, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
1014 1014 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0)
1015 1015 __ jcc(Assembler::lessEqual, not_ok);
1016 1016 __ testl(obj_size, Klass::_lh_instance_slow_path_bit);
1017 1017 __ jcc(Assembler::zero, ok);
1018 1018 __ bind(not_ok);
1019 1019 __ stop("assert(can be fast path allocated)");
1020 1020 __ should_not_reach_here();
1021 1021 __ bind(ok);
1022 1022 }
1023 1023 #endif // ASSERT
1024 1024
1025 1025 // if we got here then the TLAB allocation failed, so try
1026 1026 // refilling the TLAB or allocating directly from eden.
1027 1027 Label retry_tlab, try_eden;
1028 1028 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass)
1029 1029
1030 1030 __ bind(retry_tlab);
1031 1031
1032 1032 // get the instance size (size is postive so movl is fine for 64bit)
1033 1033 __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
1034 1034 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
1035 1035 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1036 1036 __ verify_oop(obj);
1037 1037 __ pop(rbx);
1038 1038 __ pop(rdi);
1039 1039 __ ret(0);
1040 1040
1041 1041 __ bind(try_eden);
1042 1042 // get the instance size (size is postive so movl is fine for 64bit)
1043 1043 __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
1044 1044 __ eden_allocate(obj, obj_size, 0, t1, slow_path);
1045 1045 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1046 1046 __ verify_oop(obj);
1047 1047 __ pop(rbx);
1048 1048 __ pop(rdi);
1049 1049 __ ret(0);
1050 1050
1051 1051 __ bind(slow_path);
1052 1052 __ pop(rbx);
1053 1053 __ pop(rdi);
1054 1054 }
1055 1055
1056 1056 __ enter();
1057 1057 OopMap* map = save_live_registers(sasm, 2);
1058 1058 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1059 1059 oop_maps = new OopMapSet();
1060 1060 oop_maps->add_gc_map(call_offset, map);
1061 1061 restore_live_registers_except_rax(sasm);
1062 1062 __ verify_oop(obj);
1063 1063 __ leave();
1064 1064 __ ret(0);
1065 1065
1066 1066 // rax,: new instance
1067 1067 }
1068 1068
1069 1069 break;
1070 1070
1071 1071 #ifdef TIERED
1072 1072 case counter_overflow_id:
1073 1073 {
1074 1074 Register bci = rax;
1075 1075 __ enter();
1076 1076 OopMap* map = save_live_registers(sasm, 2);
1077 1077 // Retrieve bci
1078 1078 __ movl(bci, Address(rbp, 2*BytesPerWord));
1079 1079 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci);
1080 1080 oop_maps = new OopMapSet();
1081 1081 oop_maps->add_gc_map(call_offset, map);
1082 1082 restore_live_registers(sasm);
1083 1083 __ leave();
1084 1084 __ ret(0);
1085 1085 }
1086 1086 break;
1087 1087 #endif // TIERED
1088 1088
1089 1089 case new_type_array_id:
1090 1090 case new_object_array_id:
1091 1091 {
1092 1092 Register length = rbx; // Incoming
1093 1093 Register klass = rdx; // Incoming
1094 1094 Register obj = rax; // Result
1095 1095
1096 1096 if (id == new_type_array_id) {
1097 1097 __ set_info("new_type_array", dont_gc_arguments);
1098 1098 } else {
1099 1099 __ set_info("new_object_array", dont_gc_arguments);
1100 1100 }
1101 1101
1102 1102 #ifdef ASSERT
1103 1103 // assert object type is really an array of the proper kind
1104 1104 {
1105 1105 Label ok;
1106 1106 Register t0 = obj;
1107 1107 __ movl(t0, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
1108 1108 __ sarl(t0, Klass::_lh_array_tag_shift);
1109 1109 int tag = ((id == new_type_array_id)
1110 1110 ? Klass::_lh_array_tag_type_value
1111 1111 : Klass::_lh_array_tag_obj_value);
1112 1112 __ cmpl(t0, tag);
1113 1113 __ jcc(Assembler::equal, ok);
1114 1114 __ stop("assert(is an array klass)");
1115 1115 __ should_not_reach_here();
1116 1116 __ bind(ok);
1117 1117 }
1118 1118 #endif // ASSERT
1119 1119
1120 1120 if (UseTLAB && FastTLABRefill) {
1121 1121 Register arr_size = rsi;
1122 1122 Register t1 = rcx; // must be rcx for use as shift count
1123 1123 Register t2 = rdi;
1124 1124 Label slow_path;
1125 1125 assert_different_registers(length, klass, obj, arr_size, t1, t2);
1126 1126
1127 1127 // check that array length is small enough for fast path.
1128 1128 __ cmpl(length, C1_MacroAssembler::max_array_allocation_length);
1129 1129 __ jcc(Assembler::above, slow_path);
1130 1130
1131 1131 // if we got here then the TLAB allocation failed, so try
1132 1132 // refilling the TLAB or allocating directly from eden.
1133 1133 Label retry_tlab, try_eden;
1134 1134 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx, & rdx
1135 1135
1136 1136 __ bind(retry_tlab);
1137 1137
1138 1138 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1139 1139 // since size is postive movl does right thing on 64bit
1140 1140 __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
1141 1141 // since size is postive movl does right thing on 64bit
1142 1142 __ movl(arr_size, length);
1143 1143 assert(t1 == rcx, "fixed register usage");
1144 1144 __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1145 1145 __ shrptr(t1, Klass::_lh_header_size_shift);
1146 1146 __ andptr(t1, Klass::_lh_header_size_mask);
1147 1147 __ addptr(arr_size, t1);
1148 1148 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1149 1149 __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1150 1150
1151 1151 __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
1152 1152
1153 1153 __ initialize_header(obj, klass, length, t1, t2);
1154 1154 __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte)));
1155 1155 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1156 1156 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1157 1157 __ andptr(t1, Klass::_lh_header_size_mask);
1158 1158 __ subptr(arr_size, t1); // body length
1159 1159 __ addptr(t1, obj); // body start
1160 1160 __ initialize_body(t1, arr_size, 0, t2);
1161 1161 __ verify_oop(obj);
1162 1162 __ ret(0);
1163 1163
1164 1164 __ bind(try_eden);
1165 1165 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1166 1166 // since size is postive movl does right thing on 64bit
1167 1167 __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
1168 1168 // since size is postive movl does right thing on 64bit
1169 1169 __ movl(arr_size, length);
1170 1170 assert(t1 == rcx, "fixed register usage");
1171 1171 __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1172 1172 __ shrptr(t1, Klass::_lh_header_size_shift);
1173 1173 __ andptr(t1, Klass::_lh_header_size_mask);
1174 1174 __ addptr(arr_size, t1);
1175 1175 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1176 1176 __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1177 1177
1178 1178 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size
1179 1179
1180 1180 __ initialize_header(obj, klass, length, t1, t2);
1181 1181 __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte)));
1182 1182 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1183 1183 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1184 1184 __ andptr(t1, Klass::_lh_header_size_mask);
1185 1185 __ subptr(arr_size, t1); // body length
1186 1186 __ addptr(t1, obj); // body start
1187 1187 __ initialize_body(t1, arr_size, 0, t2);
1188 1188 __ verify_oop(obj);
1189 1189 __ ret(0);
1190 1190
1191 1191 __ bind(slow_path);
1192 1192 }
1193 1193
1194 1194 __ enter();
1195 1195 OopMap* map = save_live_registers(sasm, 3);
1196 1196 int call_offset;
1197 1197 if (id == new_type_array_id) {
1198 1198 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1199 1199 } else {
1200 1200 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1201 1201 }
1202 1202
1203 1203 oop_maps = new OopMapSet();
1204 1204 oop_maps->add_gc_map(call_offset, map);
1205 1205 restore_live_registers_except_rax(sasm);
1206 1206
1207 1207 __ verify_oop(obj);
1208 1208 __ leave();
1209 1209 __ ret(0);
1210 1210
1211 1211 // rax,: new array
1212 1212 }
1213 1213 break;
1214 1214
1215 1215 case new_multi_array_id:
1216 1216 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
1217 1217 // rax,: klass
1218 1218 // rbx,: rank
1219 1219 // rcx: address of 1st dimension
1220 1220 OopMap* map = save_live_registers(sasm, 4);
1221 1221 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
1222 1222
1223 1223 oop_maps = new OopMapSet();
1224 1224 oop_maps->add_gc_map(call_offset, map);
1225 1225 restore_live_registers_except_rax(sasm);
1226 1226
1227 1227 // rax,: new multi array
1228 1228 __ verify_oop(rax);
1229 1229 }
1230 1230 break;
1231 1231
1232 1232 case register_finalizer_id:
1233 1233 {
1234 1234 __ set_info("register_finalizer", dont_gc_arguments);
1235 1235
1236 1236 // This is called via call_runtime so the arguments
1237 1237 // will be place in C abi locations
1238 1238
1239 1239 #ifdef _LP64
1240 1240 __ verify_oop(c_rarg0);
1241 1241 __ mov(rax, c_rarg0);
1242 1242 #else
1243 1243 // The object is passed on the stack and we haven't pushed a
1244 1244 // frame yet so it's one work away from top of stack.
1245 1245 __ movptr(rax, Address(rsp, 1 * BytesPerWord));
1246 1246 __ verify_oop(rax);
1247 1247 #endif // _LP64
1248 1248
1249 1249 // load the klass and check the has finalizer flag
1250 1250 Label register_finalizer;
1251 1251 Register t = rsi;
1252 1252 __ movptr(t, Address(rax, oopDesc::klass_offset_in_bytes()));
1253 1253 __ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
1254 1254 __ testl(t, JVM_ACC_HAS_FINALIZER);
1255 1255 __ jcc(Assembler::notZero, register_finalizer);
1256 1256 __ ret(0);
1257 1257
1258 1258 __ bind(register_finalizer);
1259 1259 __ enter();
1260 1260 OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */);
1261 1261 int call_offset = __ call_RT(noreg, noreg,
1262 1262 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax);
1263 1263 oop_maps = new OopMapSet();
1264 1264 oop_maps->add_gc_map(call_offset, oop_map);
1265 1265
1266 1266 // Now restore all the live registers
1267 1267 restore_live_registers(sasm);
1268 1268
1269 1269 __ leave();
1270 1270 __ ret(0);
1271 1271 }
1272 1272 break;
1273 1273
1274 1274 case throw_range_check_failed_id:
1275 1275 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
1276 1276 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
1277 1277 }
1278 1278 break;
1279 1279
1280 1280 case throw_index_exception_id:
1281 1281 { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);
1282 1282 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
1283 1283 }
1284 1284 break;
1285 1285
1286 1286 case throw_div0_exception_id:
1287 1287 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);
1288 1288 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
1289 1289 }
1290 1290 break;
1291 1291
1292 1292 case throw_null_pointer_exception_id:
1293 1293 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);
1294 1294 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
1295 1295 }
1296 1296 break;
1297 1297
1298 1298 case handle_exception_nofpu_id:
1299 1299 save_fpu_registers = false;
1300 1300 // fall through
1301 1301 case handle_exception_id:
1302 1302 { StubFrame f(sasm, "handle_exception", dont_gc_arguments);
1303 1303 oop_maps = new OopMapSet();
1304 1304 OopMap* oop_map = save_live_registers(sasm, 1, save_fpu_registers);
1305 1305 generate_handle_exception(sasm, oop_maps, oop_map, save_fpu_registers);
1306 1306 }
1307 1307 break;
1308 1308
1309 1309 case unwind_exception_id:
1310 1310 { __ set_info("unwind_exception", dont_gc_arguments);
1311 1311 // note: no stubframe since we are about to leave the current
1312 1312 // activation and we are calling a leaf VM function only.
1313 1313 generate_unwind_exception(sasm);
1314 1314 }
1315 1315 break;
1316 1316
1317 1317 case throw_array_store_exception_id:
1318 1318 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1319 1319 // tos + 0: link
1320 1320 // + 1: return address
1321 1321 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), false);
1322 1322 }
1323 1323 break;
1324 1324
1325 1325 case throw_class_cast_exception_id:
1326 1326 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
1327 1327 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
1328 1328 }
1329 1329 break;
1330 1330
1331 1331 case throw_incompatible_class_change_error_id:
1332 1332 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
1333 1333 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1334 1334 }
1335 1335 break;
1336 1336
1337 1337 case slow_subtype_check_id:
1338 1338 {
1339 1339 // Typical calling sequence:
1340 1340 // __ push(klass_RInfo); // object klass or other subclass
1341 1341 // __ push(sup_k_RInfo); // array element klass or other superclass
1342 1342 // __ call(slow_subtype_check);
1343 1343 // Note that the subclass is pushed first, and is therefore deepest.
1344 1344 // Previous versions of this code reversed the names 'sub' and 'super'.
1345 1345 // This was operationally harmless but made the code unreadable.
1346 1346 enum layout {
1347 1347 rax_off, SLOT2(raxH_off)
1348 1348 rcx_off, SLOT2(rcxH_off)
1349 1349 rsi_off, SLOT2(rsiH_off)
1350 1350 rdi_off, SLOT2(rdiH_off)
1351 1351 // saved_rbp_off, SLOT2(saved_rbpH_off)
1352 1352 return_off, SLOT2(returnH_off)
1353 1353 sup_k_off, SLOT2(sup_kH_off)
1354 1354 klass_off, SLOT2(superH_off)
1355 1355 framesize,
1356 1356 result_off = klass_off // deepest argument is also the return value
1357 1357 };
1358 1358
1359 1359 __ set_info("slow_subtype_check", dont_gc_arguments);
1360 1360 __ push(rdi);
1361 1361 __ push(rsi);
1362 1362 __ push(rcx);
1363 1363 __ push(rax);
1364 1364
1365 1365 // This is called by pushing args and not with C abi
1366 1366 __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass
1367 1367 __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass
1368 1368
1369 1369 Label miss;
1370 1370 __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss);
1371 1371
1372 1372 // fallthrough on success:
1373 1373 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result
1374 1374 __ pop(rax);
1375 1375 __ pop(rcx);
1376 1376 __ pop(rsi);
1377 1377 __ pop(rdi);
1378 1378 __ ret(0);
1379 1379
1380 1380 __ bind(miss);
1381 1381 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result
1382 1382 __ pop(rax);
1383 1383 __ pop(rcx);
1384 1384 __ pop(rsi);
1385 1385 __ pop(rdi);
1386 1386 __ ret(0);
1387 1387 }
1388 1388 break;
1389 1389
1390 1390 case monitorenter_nofpu_id:
1391 1391 save_fpu_registers = false;
1392 1392 // fall through
1393 1393 case monitorenter_id:
1394 1394 {
1395 1395 StubFrame f(sasm, "monitorenter", dont_gc_arguments);
1396 1396 OopMap* map = save_live_registers(sasm, 3, save_fpu_registers);
1397 1397
1398 1398 // Called with store_parameter and not C abi
1399 1399
1400 1400 f.load_argument(1, rax); // rax,: object
1401 1401 f.load_argument(0, rbx); // rbx,: lock address
1402 1402
1403 1403 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx);
1404 1404
1405 1405 oop_maps = new OopMapSet();
1406 1406 oop_maps->add_gc_map(call_offset, map);
1407 1407 restore_live_registers(sasm, save_fpu_registers);
1408 1408 }
1409 1409 break;
1410 1410
1411 1411 case monitorexit_nofpu_id:
1412 1412 save_fpu_registers = false;
1413 1413 // fall through
1414 1414 case monitorexit_id:
1415 1415 {
1416 1416 StubFrame f(sasm, "monitorexit", dont_gc_arguments);
1417 1417 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
1418 1418
1419 1419 // Called with store_parameter and not C abi
1420 1420
1421 1421 f.load_argument(0, rax); // rax,: lock address
1422 1422
1423 1423 // note: really a leaf routine but must setup last java sp
1424 1424 // => use call_RT for now (speed can be improved by
1425 1425 // doing last java sp setup manually)
1426 1426 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax);
1427 1427
1428 1428 oop_maps = new OopMapSet();
1429 1429 oop_maps->add_gc_map(call_offset, map);
1430 1430 restore_live_registers(sasm, save_fpu_registers);
1431 1431
1432 1432 }
1433 1433 break;
1434 1434
1435 1435 case access_field_patching_id:
1436 1436 { StubFrame f(sasm, "access_field_patching", dont_gc_arguments);
1437 1437 // we should set up register map
1438 1438 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
1439 1439 }
1440 1440 break;
1441 1441
1442 1442 case load_klass_patching_id:
1443 1443 { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);
1444 1444 // we should set up register map
1445 1445 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
1446 1446 }
1447 1447 break;
1448 1448
1449 1449 case jvmti_exception_throw_id:
1450 1450 { // rax,: exception oop
1451 1451 StubFrame f(sasm, "jvmti_exception_throw", dont_gc_arguments);
1452 1452 // Preserve all registers across this potentially blocking call
1453 1453 const int num_rt_args = 2; // thread, exception oop
1454 1454 OopMap* map = save_live_registers(sasm, num_rt_args);
1455 1455 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), rax);
1456 1456 oop_maps = new OopMapSet();
1457 1457 oop_maps->add_gc_map(call_offset, map);
1458 1458 restore_live_registers(sasm);
1459 1459 }
1460 1460 break;
1461 1461
1462 1462 case dtrace_object_alloc_id:
1463 1463 { // rax,: object
1464 1464 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1465 1465 // we can't gc here so skip the oopmap but make sure that all
1466 1466 // the live registers get saved.
1467 1467 save_live_registers(sasm, 1);
1468 1468
1469 1469 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax));
1470 1470 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)));
1471 1471 NOT_LP64(__ pop(rax));
1472 1472
1473 1473 restore_live_registers(sasm);
1474 1474 }
1475 1475 break;
1476 1476
1477 1477 case fpu2long_stub_id:
1478 1478 {
1479 1479 // rax, and rdx are destroyed, but should be free since the result is returned there
1480 1480 // preserve rsi,ecx
1481 1481 __ push(rsi);
1482 1482 __ push(rcx);
1483 1483 LP64_ONLY(__ push(rdx);)
1484 1484
1485 1485 // check for NaN
1486 1486 Label return0, do_return, return_min_jlong, do_convert;
1487 1487
1488 1488 Address value_high_word(rsp, wordSize + 4);
1489 1489 Address value_low_word(rsp, wordSize);
1490 1490 Address result_high_word(rsp, 3*wordSize + 4);
1491 1491 Address result_low_word(rsp, 3*wordSize);
1492 1492
1493 1493 __ subptr(rsp, 32); // more than enough on 32bit
1494 1494 __ fst_d(value_low_word);
1495 1495 __ movl(rax, value_high_word);
1496 1496 __ andl(rax, 0x7ff00000);
1497 1497 __ cmpl(rax, 0x7ff00000);
1498 1498 __ jcc(Assembler::notEqual, do_convert);
1499 1499 __ movl(rax, value_high_word);
1500 1500 __ andl(rax, 0xfffff);
1501 1501 __ orl(rax, value_low_word);
1502 1502 __ jcc(Assembler::notZero, return0);
1503 1503
1504 1504 __ bind(do_convert);
1505 1505 __ fnstcw(Address(rsp, 0));
1506 1506 __ movzwl(rax, Address(rsp, 0));
1507 1507 __ orl(rax, 0xc00);
1508 1508 __ movw(Address(rsp, 2), rax);
1509 1509 __ fldcw(Address(rsp, 2));
1510 1510 __ fwait();
1511 1511 __ fistp_d(result_low_word);
1512 1512 __ fldcw(Address(rsp, 0));
1513 1513 __ fwait();
1514 1514 // This gets the entire long in rax on 64bit
1515 1515 __ movptr(rax, result_low_word);
1516 1516 // testing of high bits
1517 1517 __ movl(rdx, result_high_word);
1518 1518 __ mov(rcx, rax);
1519 1519 // What the heck is the point of the next instruction???
1520 1520 __ xorl(rcx, 0x0);
1521 1521 __ movl(rsi, 0x80000000);
1522 1522 __ xorl(rsi, rdx);
1523 1523 __ orl(rcx, rsi);
1524 1524 __ jcc(Assembler::notEqual, do_return);
1525 1525 __ fldz();
1526 1526 __ fcomp_d(value_low_word);
1527 1527 __ fnstsw_ax();
1528 1528 #ifdef _LP64
1529 1529 __ testl(rax, 0x4100); // ZF & CF == 0
1530 1530 __ jcc(Assembler::equal, return_min_jlong);
1531 1531 #else
1532 1532 __ sahf();
1533 1533 __ jcc(Assembler::above, return_min_jlong);
1534 1534 #endif // _LP64
1535 1535 // return max_jlong
1536 1536 #ifndef _LP64
1537 1537 __ movl(rdx, 0x7fffffff);
1538 1538 __ movl(rax, 0xffffffff);
1539 1539 #else
1540 1540 __ mov64(rax, CONST64(0x7fffffffffffffff));
1541 1541 #endif // _LP64
1542 1542 __ jmp(do_return);
1543 1543
1544 1544 __ bind(return_min_jlong);
1545 1545 #ifndef _LP64
1546 1546 __ movl(rdx, 0x80000000);
1547 1547 __ xorl(rax, rax);
1548 1548 #else
1549 1549 __ mov64(rax, CONST64(0x8000000000000000));
1550 1550 #endif // _LP64
1551 1551 __ jmp(do_return);
1552 1552
1553 1553 __ bind(return0);
1554 1554 __ fpop();
1555 1555 #ifndef _LP64
1556 1556 __ xorptr(rdx,rdx);
1557 1557 __ xorptr(rax,rax);
1558 1558 #else
1559 1559 __ xorptr(rax, rax);
1560 1560 #endif // _LP64
1561 1561
1562 1562 __ bind(do_return);
1563 1563 __ addptr(rsp, 32);
1564 1564 LP64_ONLY(__ pop(rdx);)
1565 1565 __ pop(rcx);
1566 1566 __ pop(rsi);
1567 1567 __ ret(0);
1568 1568 }
1569 1569 break;
1570 1570
1571 1571 #ifndef SERIALGC
1572 1572 case g1_pre_barrier_slow_id:
1573 1573 {
1574 1574 StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
1575 1575 // arg0 : previous value of memory
1576 1576
1577 1577 BarrierSet* bs = Universe::heap()->barrier_set();
1578 1578 if (bs->kind() != BarrierSet::G1SATBCTLogging) {
1579 1579 __ movptr(rax, (int)id);
1580 1580 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1581 1581 __ should_not_reach_here();
1582 1582 break;
1583 1583 }
1584 1584
1585 1585 __ push(rax);
1586 1586 __ push(rdx);
1587 1587
1588 1588 const Register pre_val = rax;
1589 1589 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1590 1590 const Register tmp = rdx;
1591 1591
1592 1592 NOT_LP64(__ get_thread(thread);)
1593 1593
1594 1594 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1595 1595 PtrQueue::byte_offset_of_active()));
1596 1596
1597 1597 Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1598 1598 PtrQueue::byte_offset_of_index()));
1599 1599 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1600 1600 PtrQueue::byte_offset_of_buf()));
1601 1601
1602 1602
1603 1603 Label done;
1604 1604 Label runtime;
1605 1605
1606 1606 // Can we store original value in the thread's buffer?
1607 1607
1608 1608 LP64_ONLY(__ movslq(tmp, queue_index);)
1609 1609 #ifdef _LP64
1610 1610 __ cmpq(tmp, 0);
1611 1611 #else
1612 1612 __ cmpl(queue_index, 0);
1613 1613 #endif
1614 1614 __ jcc(Assembler::equal, runtime);
1615 1615 #ifdef _LP64
1616 1616 __ subq(tmp, wordSize);
1617 1617 __ movl(queue_index, tmp);
1618 1618 __ addq(tmp, buffer);
1619 1619 #else
1620 1620 __ subl(queue_index, wordSize);
1621 1621 __ movl(tmp, buffer);
1622 1622 __ addl(tmp, queue_index);
1623 1623 #endif
1624 1624
1625 1625 // prev_val (rax)
1626 1626 f.load_argument(0, pre_val);
1627 1627 __ movptr(Address(tmp, 0), pre_val);
1628 1628 __ jmp(done);
1629 1629
1630 1630 __ bind(runtime);
1631 1631 // load the pre-value
1632 1632 __ push(rcx);
1633 1633 f.load_argument(0, rcx);
1634 1634 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
1635 1635 __ pop(rcx);
1636 1636
1637 1637 __ bind(done);
1638 1638 __ pop(rdx);
1639 1639 __ pop(rax);
1640 1640 }
1641 1641 break;
1642 1642
1643 1643 case g1_post_barrier_slow_id:
1644 1644 {
1645 1645 StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
1646 1646
1647 1647
1648 1648 // arg0: store_address
1649 1649 Address store_addr(rbp, 2*BytesPerWord);
1650 1650
1651 1651 BarrierSet* bs = Universe::heap()->barrier_set();
1652 1652 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1653 1653 Label done;
1654 1654 Label runtime;
1655 1655
1656 1656 // At this point we know new_value is non-NULL and the new_value crosses regsion.
1657 1657 // Must check to see if card is already dirty
1658 1658
1659 1659 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1660 1660
1661 1661 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1662 1662 PtrQueue::byte_offset_of_index()));
1663 1663 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1664 1664 PtrQueue::byte_offset_of_buf()));
1665 1665
1666 1666 __ push(rax);
1667 1667 __ push(rdx);
1668 1668
1669 1669 NOT_LP64(__ get_thread(thread);)
1670 1670 ExternalAddress cardtable((address)ct->byte_map_base);
1671 1671 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1672 1672
1673 1673 const Register card_addr = rdx;
1674 1674 #ifdef _LP64
1675 1675 const Register tmp = rscratch1;
1676 1676 f.load_argument(0, card_addr);
1677 1677 __ shrq(card_addr, CardTableModRefBS::card_shift);
1678 1678 __ lea(tmp, cardtable);
1679 1679 // get the address of the card
1680 1680 __ addq(card_addr, tmp);
1681 1681 #else
1682 1682 const Register card_index = rdx;
1683 1683 f.load_argument(0, card_index);
1684 1684 __ shrl(card_index, CardTableModRefBS::card_shift);
1685 1685
1686 1686 Address index(noreg, card_index, Address::times_1);
1687 1687 __ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index)));
1688 1688 #endif
1689 1689
1690 1690 __ cmpb(Address(card_addr, 0), 0);
1691 1691 __ jcc(Assembler::equal, done);
1692 1692
1693 1693 // storing region crossing non-NULL, card is clean.
1694 1694 // dirty card and log.
1695 1695
1696 1696 __ movb(Address(card_addr, 0), 0);
1697 1697
1698 1698 __ cmpl(queue_index, 0);
1699 1699 __ jcc(Assembler::equal, runtime);
1700 1700 __ subl(queue_index, wordSize);
1701 1701
1702 1702 const Register buffer_addr = rbx;
1703 1703 __ push(rbx);
1704 1704
1705 1705 __ movptr(buffer_addr, buffer);
1706 1706
1707 1707 #ifdef _LP64
1708 1708 __ movslq(rscratch1, queue_index);
1709 1709 __ addptr(buffer_addr, rscratch1);
1710 1710 #else
1711 1711 __ addptr(buffer_addr, queue_index);
1712 1712 #endif
1713 1713 __ movptr(Address(buffer_addr, 0), card_addr);
1714 1714
1715 1715 __ pop(rbx);
1716 1716 __ jmp(done);
1717 1717
1718 1718 __ bind(runtime);
1719 1719 NOT_LP64(__ push(rcx);)
1720 1720 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
1721 1721 NOT_LP64(__ pop(rcx);)
1722 1722
1723 1723 __ bind(done);
1724 1724 __ pop(rdx);
1725 1725 __ pop(rax);
1726 1726
1727 1727 }
1728 1728 break;
1729 1729 #endif // !SERIALGC
1730 1730
1731 1731 default:
1732 1732 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
1733 1733 __ movptr(rax, (int)id);
1734 1734 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1735 1735 __ should_not_reach_here();
1736 1736 }
1737 1737 break;
1738 1738 }
1739 1739 return oop_maps;
1740 1740 }
1741 1741
1742 1742 #undef __
↓ open down ↓ |
947 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX