Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
+++ new/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
1 1 /*
2 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "asm/assembler.hpp"
27 27 #include "interpreter/bytecodeHistogram.hpp"
28 28 #include "interpreter/interpreter.hpp"
29 29 #include "interpreter/interpreterGenerator.hpp"
30 30 #include "interpreter/interpreterRuntime.hpp"
31 31 #include "interpreter/templateTable.hpp"
32 32 #include "oops/arrayOop.hpp"
33 33 #include "oops/methodDataOop.hpp"
34 34 #include "oops/methodOop.hpp"
35 35 #include "oops/oop.inline.hpp"
36 36 #include "prims/jvmtiExport.hpp"
37 37 #include "prims/jvmtiThreadState.hpp"
38 38 #include "runtime/arguments.hpp"
39 39 #include "runtime/deoptimization.hpp"
40 40 #include "runtime/frame.inline.hpp"
41 41 #include "runtime/sharedRuntime.hpp"
42 42 #include "runtime/stubRoutines.hpp"
43 43 #include "runtime/synchronizer.hpp"
44 44 #include "runtime/timer.hpp"
45 45 #include "runtime/vframeArray.hpp"
46 46 #include "utilities/debug.hpp"
47 47
48 48 #define __ _masm->
49 49
50 50
51 51 #ifndef CC_INTERP
52 52 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
53 53 const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize;
54 54 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
55 55
56 56 //------------------------------------------------------------------------------------------------------------------------
57 57
58 58 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
59 59 address entry = __ pc();
60 60
61 61 // Note: There should be a minimal interpreter frame set up when stack
62 62 // overflow occurs since we check explicitly for it now.
63 63 //
64 64 #ifdef ASSERT
65 65 { Label L;
66 66 __ lea(rax, Address(rbp,
67 67 frame::interpreter_frame_monitor_block_top_offset * wordSize));
68 68 __ cmpptr(rax, rsp); // rax, = maximal rsp for current rbp,
69 69 // (stack grows negative)
70 70 __ jcc(Assembler::aboveEqual, L); // check if frame is complete
71 71 __ stop ("interpreter frame not set up");
72 72 __ bind(L);
73 73 }
74 74 #endif // ASSERT
75 75 // Restore bcp under the assumption that the current frame is still
76 76 // interpreted
77 77 __ restore_bcp();
78 78
79 79 // expression stack must be empty before entering the VM if an exception
80 80 // happened
81 81 __ empty_expression_stack();
82 82 __ empty_FPU_stack();
83 83 // throw exception
84 84 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
85 85 return entry;
86 86 }
87 87
88 88 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
89 89 address entry = __ pc();
90 90 // expression stack must be empty before entering the VM if an exception happened
91 91 __ empty_expression_stack();
92 92 __ empty_FPU_stack();
93 93 // setup parameters
94 94 // ??? convention: expect aberrant index in register rbx,
95 95 __ lea(rax, ExternalAddress((address)name));
96 96 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), rax, rbx);
97 97 return entry;
98 98 }
99 99
100 100 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
101 101 address entry = __ pc();
102 102 // object is at TOS
103 103 __ pop(rax);
104 104 // expression stack must be empty before entering the VM if an exception
105 105 // happened
106 106 __ empty_expression_stack();
107 107 __ empty_FPU_stack();
108 108 __ call_VM(noreg,
109 109 CAST_FROM_FN_PTR(address,
110 110 InterpreterRuntime::throw_ClassCastException),
111 111 rax);
112 112 return entry;
113 113 }
114 114
115 115 // Arguments are: required type at TOS+4, failing object (or NULL) at TOS.
116 116 address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
117 117 address entry = __ pc();
118 118
119 119 __ pop(rbx); // actual failing object is at TOS
120 120 __ pop(rax); // required type is at TOS+4
121 121
122 122 __ verify_oop(rbx);
123 123 __ verify_oop(rax);
124 124
125 125 // Various method handle types use interpreter registers as temps.
126 126 __ restore_bcp();
127 127 __ restore_locals();
128 128
129 129 // Expression stack must be empty before entering the VM for an exception.
130 130 __ empty_expression_stack();
131 131 __ empty_FPU_stack();
132 132 __ call_VM(noreg,
133 133 CAST_FROM_FN_PTR(address,
134 134 InterpreterRuntime::throw_WrongMethodTypeException),
135 135 // pass required type, failing object (or NULL)
136 136 rax, rbx);
137 137 return entry;
138 138 }
139 139
140 140
141 141 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
142 142 assert(!pass_oop || message == NULL, "either oop or message but not both");
143 143 address entry = __ pc();
144 144 if (pass_oop) {
145 145 // object is at TOS
146 146 __ pop(rbx);
147 147 }
148 148 // expression stack must be empty before entering the VM if an exception happened
149 149 __ empty_expression_stack();
150 150 __ empty_FPU_stack();
151 151 // setup parameters
152 152 __ lea(rax, ExternalAddress((address)name));
153 153 if (pass_oop) {
154 154 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), rax, rbx);
155 155 } else {
156 156 if (message != NULL) {
157 157 __ lea(rbx, ExternalAddress((address)message));
158 158 } else {
159 159 __ movptr(rbx, NULL_WORD);
160 160 }
161 161 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), rax, rbx);
162 162 }
163 163 // throw exception
164 164 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
165 165 return entry;
166 166 }
167 167
168 168
169 169 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
170 170 address entry = __ pc();
171 171 // NULL last_sp until next java call
172 172 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
173 173 __ dispatch_next(state);
174 174 return entry;
175 175 }
176 176
177 177
178 178 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
179 179 TosState incoming_state = state;
180 180 address entry = __ pc();
181 181
182 182 #ifdef COMPILER2
183 183 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
184 184 if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) {
185 185 for (int i = 1; i < 8; i++) {
186 186 __ ffree(i);
187 187 }
188 188 } else if (UseSSE < 2) {
189 189 __ empty_FPU_stack();
190 190 }
191 191 #endif
192 192 if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) {
193 193 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
194 194 } else {
195 195 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
196 196 }
197 197
198 198 // In SSE mode, interpreter returns FP results in xmm0 but they need
199 199 // to end up back on the FPU so it can operate on them.
200 200 if (incoming_state == ftos && UseSSE >= 1) {
201 201 __ subptr(rsp, wordSize);
202 202 __ movflt(Address(rsp, 0), xmm0);
203 203 __ fld_s(Address(rsp, 0));
204 204 __ addptr(rsp, wordSize);
205 205 } else if (incoming_state == dtos && UseSSE >= 2) {
206 206 __ subptr(rsp, 2*wordSize);
207 207 __ movdbl(Address(rsp, 0), xmm0);
208 208 __ fld_d(Address(rsp, 0));
209 209 __ addptr(rsp, 2*wordSize);
210 210 }
211 211
212 212 __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_return_entry_for in interpreter");
213 213
214 214 // Restore stack bottom in case i2c adjusted stack
215 215 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
216 216 // and NULL it as marker that rsp is now tos until next java call
217 217 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
218 218
219 219 __ restore_bcp();
220 220 __ restore_locals();
221 221
222 222 Label L_got_cache, L_giant_index;
223 223 if (EnableInvokeDynamic) {
224 224 __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
225 225 __ jcc(Assembler::equal, L_giant_index);
226 226 }
227 227 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
228 228 __ bind(L_got_cache);
229 229 __ movl(rbx, Address(rbx, rcx,
230 230 Address::times_ptr, constantPoolCacheOopDesc::base_offset() +
231 231 ConstantPoolCacheEntry::flags_offset()));
232 232 __ andptr(rbx, 0xFF);
233 233 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));
234 234 __ dispatch_next(state, step);
235 235
236 236 // out of the main line of code...
237 237 if (EnableInvokeDynamic) {
238 238 __ bind(L_giant_index);
239 239 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4));
240 240 __ jmp(L_got_cache);
241 241 }
242 242
243 243 return entry;
244 244 }
245 245
246 246
247 247 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
248 248 address entry = __ pc();
249 249
250 250 // In SSE mode, FP results are in xmm0
251 251 if (state == ftos && UseSSE > 0) {
252 252 __ subptr(rsp, wordSize);
253 253 __ movflt(Address(rsp, 0), xmm0);
254 254 __ fld_s(Address(rsp, 0));
255 255 __ addptr(rsp, wordSize);
256 256 } else if (state == dtos && UseSSE >= 2) {
257 257 __ subptr(rsp, 2*wordSize);
258 258 __ movdbl(Address(rsp, 0), xmm0);
259 259 __ fld_d(Address(rsp, 0));
260 260 __ addptr(rsp, 2*wordSize);
261 261 }
262 262
263 263 __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_deopt_entry_for in interpreter");
264 264
265 265 // The stack is not extended by deopt but we must NULL last_sp as this
266 266 // entry is like a "return".
267 267 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
268 268 __ restore_bcp();
269 269 __ restore_locals();
270 270 // handle exceptions
271 271 { Label L;
272 272 const Register thread = rcx;
273 273 __ get_thread(thread);
274 274 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
275 275 __ jcc(Assembler::zero, L);
276 276 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
277 277 __ should_not_reach_here();
278 278 __ bind(L);
279 279 }
280 280 __ dispatch_next(state, step);
281 281 return entry;
282 282 }
283 283
284 284
285 285 int AbstractInterpreter::BasicType_as_index(BasicType type) {
286 286 int i = 0;
287 287 switch (type) {
288 288 case T_BOOLEAN: i = 0; break;
289 289 case T_CHAR : i = 1; break;
290 290 case T_BYTE : i = 2; break;
291 291 case T_SHORT : i = 3; break;
292 292 case T_INT : // fall through
293 293 case T_LONG : // fall through
294 294 case T_VOID : i = 4; break;
295 295 case T_FLOAT : i = 5; break; // have to treat float and double separately for SSE
296 296 case T_DOUBLE : i = 6; break;
297 297 case T_OBJECT : // fall through
298 298 case T_ARRAY : i = 7; break;
299 299 default : ShouldNotReachHere();
300 300 }
301 301 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
302 302 return i;
303 303 }
304 304
305 305
306 306 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
307 307 address entry = __ pc();
308 308 switch (type) {
309 309 case T_BOOLEAN: __ c2bool(rax); break;
310 310 case T_CHAR : __ andptr(rax, 0xFFFF); break;
311 311 case T_BYTE : __ sign_extend_byte (rax); break;
312 312 case T_SHORT : __ sign_extend_short(rax); break;
313 313 case T_INT : /* nothing to do */ break;
314 314 case T_DOUBLE :
315 315 case T_FLOAT :
316 316 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
317 317 __ pop(t); // remove return address first
318 318 // Must return a result for interpreter or compiler. In SSE
319 319 // mode, results are returned in xmm0 and the FPU stack must
320 320 // be empty.
321 321 if (type == T_FLOAT && UseSSE >= 1) {
322 322 // Load ST0
323 323 __ fld_d(Address(rsp, 0));
324 324 // Store as float and empty fpu stack
325 325 __ fstp_s(Address(rsp, 0));
326 326 // and reload
327 327 __ movflt(xmm0, Address(rsp, 0));
328 328 } else if (type == T_DOUBLE && UseSSE >= 2 ) {
329 329 __ movdbl(xmm0, Address(rsp, 0));
330 330 } else {
331 331 // restore ST0
332 332 __ fld_d(Address(rsp, 0));
333 333 }
334 334 // and pop the temp
335 335 __ addptr(rsp, 2 * wordSize);
336 336 __ push(t); // restore return address
337 337 }
338 338 break;
339 339 case T_OBJECT :
340 340 // retrieve result from frame
341 341 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
342 342 // and verify it
343 343 __ verify_oop(rax);
344 344 break;
345 345 default : ShouldNotReachHere();
346 346 }
347 347 __ ret(0); // return from result handler
348 348 return entry;
349 349 }
350 350
351 351 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
352 352 address entry = __ pc();
353 353 __ push(state);
354 354 __ call_VM(noreg, runtime_entry);
355 355 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
356 356 return entry;
357 357 }
358 358
359 359
360 360 // Helpers for commoning out cases in the various type of method entries.
361 361 //
362 362
363 363 // increment invocation count & check for overflow
364 364 //
365 365 // Note: checking for negative value instead of overflow
366 366 // so we have a 'sticky' overflow test
367 367 //
368 368 // rbx,: method
369 369 // rcx: invocation counter
370 370 //
371 371 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
372 372 const Address invocation_counter(rbx, in_bytes(methodOopDesc::invocation_counter_offset()) +
373 373 in_bytes(InvocationCounter::counter_offset()));
374 374 // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
375 375 if (TieredCompilation) {
376 376 int increment = InvocationCounter::count_increment;
377 377 int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
378 378 Label no_mdo, done;
379 379 if (ProfileInterpreter) {
380 380 // Are we profiling?
381 381 __ movptr(rax, Address(rbx, methodOopDesc::method_data_offset()));
382 382 __ testptr(rax, rax);
383 383 __ jccb(Assembler::zero, no_mdo);
384 384 // Increment counter in the MDO
385 385 const Address mdo_invocation_counter(rax, in_bytes(methodDataOopDesc::invocation_counter_offset()) +
386 386 in_bytes(InvocationCounter::counter_offset()));
387 387 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
388 388 __ jmpb(done);
389 389 }
390 390 __ bind(no_mdo);
391 391 // Increment counter in methodOop (we don't need to load it, it's in rcx).
392 392 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow);
393 393 __ bind(done);
394 394 } else {
395 395 const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() +
396 396 InvocationCounter::counter_offset());
397 397
398 398 if (ProfileInterpreter) { // %%% Merge this into methodDataOop
399 399 __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
400 400 }
401 401 // Update standard invocation counters
402 402 __ movl(rax, backedge_counter); // load backedge counter
403 403
404 404 __ incrementl(rcx, InvocationCounter::count_increment);
405 405 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
406 406
407 407 __ movl(invocation_counter, rcx); // save invocation count
408 408 __ addl(rcx, rax); // add both counters
409 409
410 410 // profile_method is non-null only for interpreted method so
411 411 // profile_method != NULL == !native_call
412 412 // BytecodeInterpreter only calls for native so code is elided.
413 413
414 414 if (ProfileInterpreter && profile_method != NULL) {
415 415 // Test to see if we should create a method data oop
416 416 __ cmp32(rcx,
417 417 ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
418 418 __ jcc(Assembler::less, *profile_method_continue);
419 419
420 420 // if no method data exists, go to profile_method
421 421 __ test_method_data_pointer(rax, *profile_method);
422 422 }
423 423
424 424 __ cmp32(rcx,
425 425 ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
426 426 __ jcc(Assembler::aboveEqual, *overflow);
427 427 }
428 428 }
429 429
430 430 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
431 431
432 432 // Asm interpreter on entry
433 433 // rdi - locals
434 434 // rsi - bcp
435 435 // rbx, - method
436 436 // rdx - cpool
437 437 // rbp, - interpreter frame
438 438
439 439 // C++ interpreter on entry
440 440 // rsi - new interpreter state pointer
441 441 // rbp - interpreter frame pointer
442 442 // rbx - method
443 443
444 444 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
445 445 // rbx, - method
446 446 // rcx - rcvr (assuming there is one)
447 447 // top of stack return address of interpreter caller
448 448 // rsp - sender_sp
449 449
450 450 // C++ interpreter only
451 451 // rsi - previous interpreter state pointer
452 452
453 453 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
454 454
455 455 // InterpreterRuntime::frequency_counter_overflow takes one argument
456 456 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
457 457 // The call returns the address of the verified entry point for the method or NULL
458 458 // if the compilation did not complete (either went background or bailed out).
459 459 __ movptr(rax, (intptr_t)false);
460 460 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
461 461
462 462 __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
463 463
464 464 // Preserve invariant that rsi/rdi contain bcp/locals of sender frame
465 465 // and jump to the interpreted entry.
466 466 __ jmp(*do_continue, relocInfo::none);
467 467
468 468 }
469 469
470 470 void InterpreterGenerator::generate_stack_overflow_check(void) {
471 471 // see if we've got enough room on the stack for locals plus overhead.
472 472 // the expression stack grows down incrementally, so the normal guard
473 473 // page mechanism will work for that.
474 474 //
475 475 // Registers live on entry:
476 476 //
477 477 // Asm interpreter
478 478 // rdx: number of additional locals this frame needs (what we must check)
479 479 // rbx,: methodOop
480 480
481 481 // destroyed on exit
482 482 // rax,
483 483
484 484 // NOTE: since the additional locals are also always pushed (wasn't obvious in
485 485 // generate_method_entry) so the guard should work for them too.
486 486 //
487 487
488 488 // monitor entry size: see picture of stack set (generate_method_entry) and frame_x86.hpp
489 489 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
490 490
491 491 // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
492 492 // be sure to change this if you add/subtract anything to/from the overhead area
493 493 const int overhead_size = -(frame::interpreter_frame_initial_sp_offset*wordSize) + entry_size;
494 494
495 495 const int page_size = os::vm_page_size();
496 496
497 497 Label after_frame_check;
498 498
499 499 // see if the frame is greater than one page in size. If so,
500 500 // then we need to verify there is enough stack space remaining
501 501 // for the additional locals.
502 502 __ cmpl(rdx, (page_size - overhead_size)/Interpreter::stackElementSize);
503 503 __ jcc(Assembler::belowEqual, after_frame_check);
504 504
505 505 // compute rsp as if this were going to be the last frame on
506 506 // the stack before the red zone
507 507
508 508 Label after_frame_check_pop;
509 509
510 510 __ push(rsi);
511 511
512 512 const Register thread = rsi;
513 513
514 514 __ get_thread(thread);
515 515
516 516 const Address stack_base(thread, Thread::stack_base_offset());
517 517 const Address stack_size(thread, Thread::stack_size_offset());
518 518
519 519 // locals + overhead, in bytes
520 520 __ lea(rax, Address(noreg, rdx, Interpreter::stackElementScale(), overhead_size));
521 521
522 522 #ifdef ASSERT
523 523 Label stack_base_okay, stack_size_okay;
524 524 // verify that thread stack base is non-zero
525 525 __ cmpptr(stack_base, (int32_t)NULL_WORD);
526 526 __ jcc(Assembler::notEqual, stack_base_okay);
527 527 __ stop("stack base is zero");
528 528 __ bind(stack_base_okay);
529 529 // verify that thread stack size is non-zero
530 530 __ cmpptr(stack_size, 0);
531 531 __ jcc(Assembler::notEqual, stack_size_okay);
532 532 __ stop("stack size is zero");
533 533 __ bind(stack_size_okay);
534 534 #endif
535 535
536 536 // Add stack base to locals and subtract stack size
537 537 __ addptr(rax, stack_base);
538 538 __ subptr(rax, stack_size);
539 539
540 540 // Use the maximum number of pages we might bang.
541 541 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
542 542 (StackRedPages+StackYellowPages);
543 543 __ addptr(rax, max_pages * page_size);
544 544
545 545 // check against the current stack bottom
546 546 __ cmpptr(rsp, rax);
547 547 __ jcc(Assembler::above, after_frame_check_pop);
548 548
549 549 __ pop(rsi); // get saved bcp / (c++ prev state ).
550 550
551 551 __ pop(rax); // get return address
552 552 __ jump(ExternalAddress(Interpreter::throw_StackOverflowError_entry()));
553 553
554 554 // all done with frame size check
555 555 __ bind(after_frame_check_pop);
556 556 __ pop(rsi);
557 557
558 558 __ bind(after_frame_check);
559 559 }
560 560
561 561 // Allocate monitor and lock method (asm interpreter)
562 562 // rbx, - methodOop
563 563 //
564 564 void InterpreterGenerator::lock_method(void) {
565 565 // synchronize method
566 566 const Address access_flags (rbx, methodOopDesc::access_flags_offset());
567 567 const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
568 568 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
569 569
570 570 #ifdef ASSERT
571 571 { Label L;
572 572 __ movl(rax, access_flags);
573 573 __ testl(rax, JVM_ACC_SYNCHRONIZED);
574 574 __ jcc(Assembler::notZero, L);
575 575 __ stop("method doesn't need synchronization");
576 576 __ bind(L);
577 577 }
578 578 #endif // ASSERT
579 579 // get synchronization object
580 580 { Label done;
581 581 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
582 582 __ movl(rax, access_flags);
583 583 __ testl(rax, JVM_ACC_STATIC);
584 584 __ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case)
585 585 __ jcc(Assembler::zero, done);
586 586 __ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
587 587 __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
588 588 __ movptr(rax, Address(rax, mirror_offset));
589 589 __ bind(done);
590 590 }
591 591 // add space for monitor & lock
592 592 __ subptr(rsp, entry_size); // add space for a monitor entry
593 593 __ movptr(monitor_block_top, rsp); // set new monitor block top
594 594 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
595 595 __ mov(rdx, rsp); // object address
596 596 __ lock_object(rdx);
597 597 }
598 598
599 599 //
600 600 // Generate a fixed interpreter frame. This is identical setup for interpreted methods
601 601 // and for native methods hence the shared code.
602 602
603 603 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
604 604 // initialize fixed part of activation frame
605 605 __ push(rax); // save return address
606 606 __ enter(); // save old & set new rbp,
607 607
608 608
609 609 __ push(rsi); // set sender sp
610 610 __ push((int32_t)NULL_WORD); // leave last_sp as null
611 611 __ movptr(rsi, Address(rbx,methodOopDesc::const_offset())); // get constMethodOop
612 612 __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase
613 613 __ push(rbx); // save methodOop
614 614 if (ProfileInterpreter) {
615 615 Label method_data_continue;
616 616 __ movptr(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
617 617 __ testptr(rdx, rdx);
618 618 __ jcc(Assembler::zero, method_data_continue);
619 619 __ addptr(rdx, in_bytes(methodDataOopDesc::data_offset()));
620 620 __ bind(method_data_continue);
621 621 __ push(rdx); // set the mdp (method data pointer)
622 622 } else {
623 623 __ push(0);
624 624 }
625 625
626 626 __ movptr(rdx, Address(rbx, methodOopDesc::constants_offset()));
627 627 __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
628 628 __ push(rdx); // set constant pool cache
629 629 __ push(rdi); // set locals pointer
630 630 if (native_call) {
631 631 __ push(0); // no bcp
632 632 } else {
633 633 __ push(rsi); // set bcp
634 634 }
635 635 __ push(0); // reserve word for pointer to expression stack bottom
636 636 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
637 637 }
638 638
639 639 // End of helpers
640 640
641 641 //
642 642 // Various method entries
643 643 //------------------------------------------------------------------------------------------------------------------------
644 644 //
645 645 //
646 646
647 647 // Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
648 648
649 649 address InterpreterGenerator::generate_accessor_entry(void) {
650 650
651 651 // rbx,: methodOop
652 652 // rcx: receiver (preserve for slow entry into asm interpreter)
653 653
654 654 // rsi: senderSP must preserved for slow path, set SP to it on fast path
655 655
656 656 address entry_point = __ pc();
657 657 Label xreturn_path;
658 658
659 659 // do fastpath for resolved accessor methods
660 660 if (UseFastAccessorMethods) {
661 661 Label slow_path;
662 662 // If we need a safepoint check, generate full interpreter entry.
663 663 ExternalAddress state(SafepointSynchronize::address_of_state());
664 664 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
665 665 SafepointSynchronize::_not_synchronized);
666 666
667 667 __ jcc(Assembler::notEqual, slow_path);
668 668 // ASM/C++ Interpreter
669 669 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
670 670 // Note: We can only use this code if the getfield has been resolved
671 671 // and if we don't have a null-pointer exception => check for
672 672 // these conditions first and use slow path if necessary.
673 673 // rbx,: method
674 674 // rcx: receiver
675 675 __ movptr(rax, Address(rsp, wordSize));
676 676
677 677 // check if local 0 != NULL and read field
678 678 __ testptr(rax, rax);
679 679 __ jcc(Assembler::zero, slow_path);
680 680
681 681 __ movptr(rdi, Address(rbx, methodOopDesc::constants_offset()));
682 682 // read first instruction word and extract bytecode @ 1 and index @ 2
683 683 __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
684 684 __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
685 685 // Shift codes right to get the index on the right.
686 686 // The bytecode fetched looks like <index><0xb4><0x2a>
687 687 __ shrl(rdx, 2*BitsPerByte);
688 688 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
689 689 __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
690 690
691 691 // rax,: local 0
692 692 // rbx,: method
693 693 // rcx: receiver - do not destroy since it is needed for slow path!
694 694 // rcx: scratch
695 695 // rdx: constant pool cache index
696 696 // rdi: constant pool cache
697 697 // rsi: sender sp
698 698
699 699 // check if getfield has been resolved and read constant pool cache entry
700 700 // check the validity of the cache entry by testing whether _indices field
701 701 // contains Bytecode::_getfield in b1 byte.
702 702 assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
703 703 __ movl(rcx,
704 704 Address(rdi,
705 705 rdx,
706 706 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
707 707 __ shrl(rcx, 2*BitsPerByte);
708 708 __ andl(rcx, 0xFF);
709 709 __ cmpl(rcx, Bytecodes::_getfield);
710 710 __ jcc(Assembler::notEqual, slow_path);
711 711
712 712 // Note: constant pool entry is not valid before bytecode is resolved
713 713 __ movptr(rcx,
714 714 Address(rdi,
715 715 rdx,
716 716 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()));
717 717 __ movl(rdx,
718 718 Address(rdi,
719 719 rdx,
720 720 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset()));
721 721
722 722 Label notByte, notShort, notChar;
723 723 const Address field_address (rax, rcx, Address::times_1);
724 724
725 725 // Need to differentiate between igetfield, agetfield, bgetfield etc.
726 726 // because they are different sizes.
727 727 // Use the type from the constant pool cache
728 728 __ shrl(rdx, ConstantPoolCacheEntry::tosBits);
729 729 // Make sure we don't need to mask rdx for tosBits after the above shift
730 730 ConstantPoolCacheEntry::verify_tosBits();
731 731 __ cmpl(rdx, btos);
732 732 __ jcc(Assembler::notEqual, notByte);
733 733 __ load_signed_byte(rax, field_address);
734 734 __ jmp(xreturn_path);
735 735
736 736 __ bind(notByte);
737 737 __ cmpl(rdx, stos);
738 738 __ jcc(Assembler::notEqual, notShort);
739 739 __ load_signed_short(rax, field_address);
740 740 __ jmp(xreturn_path);
741 741
742 742 __ bind(notShort);
743 743 __ cmpl(rdx, ctos);
744 744 __ jcc(Assembler::notEqual, notChar);
745 745 __ load_unsigned_short(rax, field_address);
746 746 __ jmp(xreturn_path);
747 747
748 748 __ bind(notChar);
749 749 #ifdef ASSERT
750 750 Label okay;
751 751 __ cmpl(rdx, atos);
752 752 __ jcc(Assembler::equal, okay);
753 753 __ cmpl(rdx, itos);
754 754 __ jcc(Assembler::equal, okay);
755 755 __ stop("what type is this?");
756 756 __ bind(okay);
757 757 #endif // ASSERT
758 758 // All the rest are a 32 bit wordsize
759 759 // This is ok for now. Since fast accessors should be going away
760 760 __ movptr(rax, field_address);
761 761
762 762 __ bind(xreturn_path);
763 763
764 764 // _ireturn/_areturn
765 765 __ pop(rdi); // get return address
766 766 __ mov(rsp, rsi); // set sp to sender sp
767 767 __ jmp(rdi);
768 768
769 769 // generate a vanilla interpreter entry as the slow path
770 770 __ bind(slow_path);
771 771
772 772 (void) generate_normal_entry(false);
773 773 return entry_point;
774 774 }
775 775 return NULL;
776 776
777 777 }
778 778
779 779 //
780 780 // Interpreter stub for calling a native method. (asm interpreter)
781 781 // This sets up a somewhat different looking stack for calling the native method
782 782 // than the typical interpreter frame setup.
783 783 //
784 784
785 785 address InterpreterGenerator::generate_native_entry(bool synchronized) {
786 786 // determine code generation flags
787 787 bool inc_counter = UseCompiler || CountCompiledCalls;
788 788
789 789 // rbx,: methodOop
790 790 // rsi: sender sp
791 791 // rsi: previous interpreter state (C++ interpreter) must preserve
792 792 address entry_point = __ pc();
793 793
794 794
795 795 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
796 796 const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset());
797 797 const Address access_flags (rbx, methodOopDesc::access_flags_offset());
798 798
799 799 // get parameter size (always needed)
800 800 __ load_unsigned_short(rcx, size_of_parameters);
801 801
802 802 // native calls don't need the stack size check since they have no expression stack
803 803 // and the arguments are already on the stack and we only add a handful of words
804 804 // to the stack
805 805
806 806 // rbx,: methodOop
807 807 // rcx: size of parameters
808 808 // rsi: sender sp
809 809
810 810 __ pop(rax); // get return address
811 811 // for natives the size of locals is zero
812 812
813 813 // compute beginning of parameters (rdi)
814 814 __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
815 815
816 816
817 817 // add 2 zero-initialized slots for native calls
818 818 // NULL result handler
819 819 __ push((int32_t)NULL_WORD);
820 820 // NULL oop temp (mirror or jni oop result)
821 821 __ push((int32_t)NULL_WORD);
822 822
823 823 if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count
824 824 // initialize fixed part of activation frame
825 825
826 826 generate_fixed_frame(true);
827 827
828 828 // make sure method is native & not abstract
829 829 #ifdef ASSERT
830 830 __ movl(rax, access_flags);
831 831 {
832 832 Label L;
833 833 __ testl(rax, JVM_ACC_NATIVE);
834 834 __ jcc(Assembler::notZero, L);
835 835 __ stop("tried to execute non-native method as native");
836 836 __ bind(L);
837 837 }
838 838 { Label L;
839 839 __ testl(rax, JVM_ACC_ABSTRACT);
840 840 __ jcc(Assembler::zero, L);
841 841 __ stop("tried to execute abstract method in interpreter");
842 842 __ bind(L);
843 843 }
844 844 #endif
845 845
846 846 // Since at this point in the method invocation the exception handler
847 847 // would try to exit the monitor of synchronized methods which hasn't
848 848 // been entered yet, we set the thread local variable
849 849 // _do_not_unlock_if_synchronized to true. The remove_activation will
850 850 // check this flag.
851 851
852 852 __ get_thread(rax);
853 853 const Address do_not_unlock_if_synchronized(rax,
854 854 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
855 855 __ movbool(do_not_unlock_if_synchronized, true);
856 856
857 857 // increment invocation count & check for overflow
858 858 Label invocation_counter_overflow;
859 859 if (inc_counter) {
860 860 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
861 861 }
862 862
863 863 Label continue_after_compile;
864 864 __ bind(continue_after_compile);
865 865
866 866 bang_stack_shadow_pages(true);
867 867
868 868 // reset the _do_not_unlock_if_synchronized flag
869 869 __ get_thread(rax);
870 870 __ movbool(do_not_unlock_if_synchronized, false);
871 871
872 872 // check for synchronized methods
873 873 // Must happen AFTER invocation_counter check and stack overflow check,
874 874 // so method is not locked if overflows.
875 875 //
876 876 if (synchronized) {
877 877 lock_method();
878 878 } else {
879 879 // no synchronization necessary
880 880 #ifdef ASSERT
881 881 { Label L;
882 882 __ movl(rax, access_flags);
883 883 __ testl(rax, JVM_ACC_SYNCHRONIZED);
884 884 __ jcc(Assembler::zero, L);
885 885 __ stop("method needs synchronization");
886 886 __ bind(L);
887 887 }
888 888 #endif
889 889 }
890 890
891 891 // start execution
892 892 #ifdef ASSERT
893 893 { Label L;
894 894 const Address monitor_block_top (rbp,
895 895 frame::interpreter_frame_monitor_block_top_offset * wordSize);
896 896 __ movptr(rax, monitor_block_top);
897 897 __ cmpptr(rax, rsp);
898 898 __ jcc(Assembler::equal, L);
899 899 __ stop("broken stack frame setup in interpreter");
900 900 __ bind(L);
901 901 }
902 902 #endif
903 903
904 904 // jvmti/dtrace support
905 905 __ notify_method_entry();
906 906
907 907 // work registers
908 908 const Register method = rbx;
909 909 const Register thread = rdi;
910 910 const Register t = rcx;
911 911
912 912 // allocate space for parameters
913 913 __ get_method(method);
914 914 __ verify_oop(method);
915 915 __ load_unsigned_short(t, Address(method, methodOopDesc::size_of_parameters_offset()));
916 916 __ shlptr(t, Interpreter::logStackElementSize);
917 917 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
918 918 __ subptr(rsp, t);
919 919 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
920 920
921 921 // get signature handler
922 922 { Label L;
923 923 __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
924 924 __ testptr(t, t);
925 925 __ jcc(Assembler::notZero, L);
926 926 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
927 927 __ get_method(method);
928 928 __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
929 929 __ bind(L);
930 930 }
931 931
932 932 // call signature handler
933 933 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rdi, "adjust this code");
934 934 assert(InterpreterRuntime::SignatureHandlerGenerator::to () == rsp, "adjust this code");
935 935 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t , "adjust this code");
936 936 // The generated handlers do not touch RBX (the method oop).
937 937 // However, large signatures cannot be cached and are generated
938 938 // each time here. The slow-path generator will blow RBX
939 939 // sometime, so we must reload it after the call.
940 940 __ call(t);
941 941 __ get_method(method); // slow path call blows RBX on DevStudio 5.0
942 942
943 943 // result handler is in rax,
944 944 // set result handler
945 945 __ movptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), rax);
946 946
947 947 // pass mirror handle if static call
948 948 { Label L;
949 949 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
950 950 __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
951 951 __ testl(t, JVM_ACC_STATIC);
952 952 __ jcc(Assembler::zero, L);
953 953 // get mirror
954 954 __ movptr(t, Address(method, methodOopDesc:: constants_offset()));
955 955 __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
956 956 __ movptr(t, Address(t, mirror_offset));
957 957 // copy mirror into activation frame
958 958 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t);
959 959 // pass handle to mirror
960 960 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
961 961 __ movptr(Address(rsp, wordSize), t);
962 962 __ bind(L);
963 963 }
964 964
965 965 // get native function entry point
966 966 { Label L;
967 967 __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
968 968 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
969 969 __ cmpptr(rax, unsatisfied.addr());
970 970 __ jcc(Assembler::notEqual, L);
971 971 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
972 972 __ get_method(method);
973 973 __ verify_oop(method);
974 974 __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
975 975 __ bind(L);
976 976 }
977 977
978 978 // pass JNIEnv
979 979 __ get_thread(thread);
980 980 __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
981 981 __ movptr(Address(rsp, 0), t);
982 982
983 983 // set_last_Java_frame_before_call
984 984 // It is enough that the pc()
985 985 // points into the right code segment. It does not have to be the correct return pc.
986 986 __ set_last_Java_frame(thread, noreg, rbp, __ pc());
987 987
988 988 // change thread state
989 989 #ifdef ASSERT
990 990 { Label L;
991 991 __ movl(t, Address(thread, JavaThread::thread_state_offset()));
992 992 __ cmpl(t, _thread_in_Java);
993 993 __ jcc(Assembler::equal, L);
994 994 __ stop("Wrong thread state in native stub");
995 995 __ bind(L);
996 996 }
997 997 #endif
998 998
999 999 // Change state to native
1000 1000 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1001 1001 __ call(rax);
1002 1002
1003 1003 // result potentially in rdx:rax or ST0
1004 1004
1005 1005 // Either restore the MXCSR register after returning from the JNI Call
1006 1006 // or verify that it wasn't changed.
1007 1007 if (VM_Version::supports_sse()) {
1008 1008 if (RestoreMXCSROnJNICalls) {
1009 1009 __ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
1010 1010 }
1011 1011 else if (CheckJNICalls ) {
1012 1012 __ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
1013 1013 }
1014 1014 }
1015 1015
1016 1016 // Either restore the x87 floating pointer control word after returning
1017 1017 // from the JNI call or verify that it wasn't changed.
1018 1018 if (CheckJNICalls) {
1019 1019 __ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
1020 1020 }
1021 1021
1022 1022 // save potential result in ST(0) & rdx:rax
1023 1023 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 -
1024 1024 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers)
1025 1025 // It is safe to do this push because state is _thread_in_native and return address will be found
1026 1026 // via _last_native_pc and not via _last_jave_sp
1027 1027
1028 1028 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result.
1029 1029 // If the order changes or anything else is added to the stack the code in
1030 1030 // interpreter_frame_result will have to be changed.
1031 1031
1032 1032 { Label L;
1033 1033 Label push_double;
1034 1034 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
1035 1035 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
1036 1036 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
1037 1037 float_handler.addr());
1038 1038 __ jcc(Assembler::equal, push_double);
1039 1039 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
1040 1040 double_handler.addr());
1041 1041 __ jcc(Assembler::notEqual, L);
1042 1042 __ bind(push_double);
1043 1043 __ push(dtos);
1044 1044 __ bind(L);
1045 1045 }
1046 1046 __ push(ltos);
1047 1047
1048 1048 // change thread state
1049 1049 __ get_thread(thread);
1050 1050 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
1051 1051 if(os::is_MP()) {
1052 1052 if (UseMembar) {
1053 1053 // Force this write out before the read below
1054 1054 __ membar(Assembler::Membar_mask_bits(
1055 1055 Assembler::LoadLoad | Assembler::LoadStore |
1056 1056 Assembler::StoreLoad | Assembler::StoreStore));
1057 1057 } else {
1058 1058 // Write serialization page so VM thread can do a pseudo remote membar.
1059 1059 // We use the current thread pointer to calculate a thread specific
1060 1060 // offset to write to within the page. This minimizes bus traffic
1061 1061 // due to cache line collision.
1062 1062 __ serialize_memory(thread, rcx);
1063 1063 }
1064 1064 }
1065 1065
1066 1066 if (AlwaysRestoreFPU) {
1067 1067 // Make sure the control word is correct.
1068 1068 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
1069 1069 }
1070 1070
1071 1071 // check for safepoint operation in progress and/or pending suspend requests
1072 1072 { Label Continue;
1073 1073
1074 1074 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
1075 1075 SafepointSynchronize::_not_synchronized);
1076 1076
1077 1077 Label L;
1078 1078 __ jcc(Assembler::notEqual, L);
1079 1079 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
1080 1080 __ jcc(Assembler::equal, Continue);
1081 1081 __ bind(L);
1082 1082
1083 1083 // Don't use call_VM as it will see a possible pending exception and forward it
1084 1084 // and never return here preventing us from clearing _last_native_pc down below.
1085 1085 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
1086 1086 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
1087 1087 // by hand.
1088 1088 //
1089 1089 __ push(thread);
1090 1090 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
1091 1091 JavaThread::check_special_condition_for_native_trans)));
1092 1092 __ increment(rsp, wordSize);
1093 1093 __ get_thread(thread);
1094 1094
1095 1095 __ bind(Continue);
1096 1096 }
1097 1097
1098 1098 // change thread state
1099 1099 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
1100 1100
1101 1101 __ reset_last_Java_frame(thread, true, true);
1102 1102
1103 1103 // reset handle block
1104 1104 __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
1105 1105 __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
1106 1106
1107 1107 // If result was an oop then unbox and save it in the frame
1108 1108 { Label L;
1109 1109 Label no_oop, store_result;
1110 1110 ExternalAddress handler(AbstractInterpreter::result_handler(T_OBJECT));
1111 1111 __ cmpptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize),
1112 1112 handler.addr());
1113 1113 __ jcc(Assembler::notEqual, no_oop);
1114 1114 __ cmpptr(Address(rsp, 0), (int32_t)NULL_WORD);
1115 1115 __ pop(ltos);
1116 1116 __ testptr(rax, rax);
1117 1117 __ jcc(Assembler::zero, store_result);
1118 1118 // unbox
1119 1119 __ movptr(rax, Address(rax, 0));
1120 1120 __ bind(store_result);
1121 1121 __ movptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset)*wordSize), rax);
1122 1122 // keep stack depth as expected by pushing oop which will eventually be discarded
1123 1123 __ push(ltos);
1124 1124 __ bind(no_oop);
1125 1125 }
1126 1126
1127 1127 {
1128 1128 Label no_reguard;
1129 1129 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
1130 1130 __ jcc(Assembler::notEqual, no_reguard);
1131 1131
1132 1132 __ pusha();
1133 1133 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1134 1134 __ popa();
1135 1135
1136 1136 __ bind(no_reguard);
1137 1137 }
1138 1138
1139 1139 // restore rsi to have legal interpreter frame,
1140 1140 // i.e., bci == 0 <=> rsi == code_base()
1141 1141 // Can't call_VM until bcp is within reasonable.
1142 1142 __ get_method(method); // method is junk from thread_in_native to now.
1143 1143 __ verify_oop(method);
1144 1144 __ movptr(rsi, Address(method,methodOopDesc::const_offset())); // get constMethodOop
1145 1145 __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase
1146 1146
1147 1147 // handle exceptions (exception handling will handle unlocking!)
1148 1148 { Label L;
1149 1149 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
1150 1150 __ jcc(Assembler::zero, L);
1151 1151 // Note: At some point we may want to unify this with the code used in call_VM_base();
1152 1152 // i.e., we should use the StubRoutines::forward_exception code. For now this
1153 1153 // doesn't work here because the rsp is not correctly set at this point.
1154 1154 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
1155 1155 __ should_not_reach_here();
1156 1156 __ bind(L);
1157 1157 }
1158 1158
1159 1159 // do unlocking if necessary
1160 1160 { Label L;
1161 1161 __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
1162 1162 __ testl(t, JVM_ACC_SYNCHRONIZED);
1163 1163 __ jcc(Assembler::zero, L);
1164 1164 // the code below should be shared with interpreter macro assembler implementation
1165 1165 { Label unlock;
1166 1166 // BasicObjectLock will be first in list, since this is a synchronized method. However, need
1167 1167 // to check that the object has not been unlocked by an explicit monitorexit bytecode.
1168 1168 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
1169 1169
1170 1170 __ lea(rdx, monitor); // address of first monitor
1171 1171
1172 1172 __ movptr(t, Address(rdx, BasicObjectLock::obj_offset_in_bytes()));
1173 1173 __ testptr(t, t);
1174 1174 __ jcc(Assembler::notZero, unlock);
1175 1175
1176 1176 // Entry already unlocked, need to throw exception
1177 1177 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1178 1178 __ should_not_reach_here();
1179 1179
1180 1180 __ bind(unlock);
1181 1181 __ unlock_object(rdx);
1182 1182 }
1183 1183 __ bind(L);
1184 1184 }
1185 1185
1186 1186 // jvmti/dtrace support
1187 1187 // Note: This must happen _after_ handling/throwing any exceptions since
1188 1188 // the exception handler code notifies the runtime of method exits
1189 1189 // too. If this happens before, method entry/exit notifications are
1190 1190 // not properly paired (was bug - gri 11/22/99).
1191 1191 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1192 1192
1193 1193 // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result
1194 1194 __ pop(ltos);
1195 1195 __ movptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
1196 1196 __ call(t);
1197 1197
1198 1198 // remove activation
1199 1199 __ movptr(t, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1200 1200 __ leave(); // remove frame anchor
1201 1201 __ pop(rdi); // get return address
1202 1202 __ mov(rsp, t); // set sp to sender sp
1203 1203 __ jmp(rdi);
1204 1204
1205 1205 if (inc_counter) {
1206 1206 // Handle overflow of counter and compile method
1207 1207 __ bind(invocation_counter_overflow);
1208 1208 generate_counter_overflow(&continue_after_compile);
1209 1209 }
1210 1210
1211 1211 return entry_point;
1212 1212 }
1213 1213
1214 1214 //
1215 1215 // Generic interpreted method entry to (asm) interpreter
1216 1216 //
1217 1217 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1218 1218 // determine code generation flags
1219 1219 bool inc_counter = UseCompiler || CountCompiledCalls;
1220 1220
1221 1221 // rbx,: methodOop
1222 1222 // rsi: sender sp
1223 1223 address entry_point = __ pc();
1224 1224
1225 1225
1226 1226 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
1227 1227 const Address size_of_locals (rbx, methodOopDesc::size_of_locals_offset());
1228 1228 const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset());
1229 1229 const Address access_flags (rbx, methodOopDesc::access_flags_offset());
1230 1230
1231 1231 // get parameter size (always needed)
1232 1232 __ load_unsigned_short(rcx, size_of_parameters);
1233 1233
1234 1234 // rbx,: methodOop
1235 1235 // rcx: size of parameters
1236 1236
1237 1237 // rsi: sender_sp (could differ from sp+wordSize if we were called via c2i )
1238 1238
1239 1239 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
1240 1240 __ subl(rdx, rcx); // rdx = no. of additional locals
1241 1241
1242 1242 // see if we've got enough room on the stack for locals plus overhead.
1243 1243 generate_stack_overflow_check();
1244 1244
1245 1245 // get return address
1246 1246 __ pop(rax);
1247 1247
1248 1248 // compute beginning of parameters (rdi)
1249 1249 __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
1250 1250
1251 1251 // rdx - # of additional locals
1252 1252 // allocate space for locals
1253 1253 // explicitly initialize locals
1254 1254 {
1255 1255 Label exit, loop;
1256 1256 __ testl(rdx, rdx);
1257 1257 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
1258 1258 __ bind(loop);
1259 1259 __ push((int32_t)NULL_WORD); // initialize local variables
1260 1260 __ decrement(rdx); // until everything initialized
1261 1261 __ jcc(Assembler::greater, loop);
1262 1262 __ bind(exit);
1263 1263 }
1264 1264
1265 1265 if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count
1266 1266 // initialize fixed part of activation frame
1267 1267 generate_fixed_frame(false);
1268 1268
1269 1269 // make sure method is not native & not abstract
1270 1270 #ifdef ASSERT
1271 1271 __ movl(rax, access_flags);
1272 1272 {
1273 1273 Label L;
1274 1274 __ testl(rax, JVM_ACC_NATIVE);
1275 1275 __ jcc(Assembler::zero, L);
1276 1276 __ stop("tried to execute native method as non-native");
1277 1277 __ bind(L);
1278 1278 }
1279 1279 { Label L;
1280 1280 __ testl(rax, JVM_ACC_ABSTRACT);
1281 1281 __ jcc(Assembler::zero, L);
1282 1282 __ stop("tried to execute abstract method in interpreter");
1283 1283 __ bind(L);
1284 1284 }
1285 1285 #endif
1286 1286
1287 1287 // Since at this point in the method invocation the exception handler
1288 1288 // would try to exit the monitor of synchronized methods which hasn't
1289 1289 // been entered yet, we set the thread local variable
1290 1290 // _do_not_unlock_if_synchronized to true. The remove_activation will
1291 1291 // check this flag.
1292 1292
1293 1293 __ get_thread(rax);
1294 1294 const Address do_not_unlock_if_synchronized(rax,
1295 1295 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1296 1296 __ movbool(do_not_unlock_if_synchronized, true);
1297 1297
1298 1298 // increment invocation count & check for overflow
1299 1299 Label invocation_counter_overflow;
1300 1300 Label profile_method;
1301 1301 Label profile_method_continue;
1302 1302 if (inc_counter) {
1303 1303 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
1304 1304 if (ProfileInterpreter) {
1305 1305 __ bind(profile_method_continue);
1306 1306 }
1307 1307 }
1308 1308 Label continue_after_compile;
1309 1309 __ bind(continue_after_compile);
1310 1310
1311 1311 bang_stack_shadow_pages(false);
1312 1312
1313 1313 // reset the _do_not_unlock_if_synchronized flag
1314 1314 __ get_thread(rax);
1315 1315 __ movbool(do_not_unlock_if_synchronized, false);
1316 1316
1317 1317 // check for synchronized methods
1318 1318 // Must happen AFTER invocation_counter check and stack overflow check,
1319 1319 // so method is not locked if overflows.
1320 1320 //
1321 1321 if (synchronized) {
1322 1322 // Allocate monitor and lock method
1323 1323 lock_method();
1324 1324 } else {
1325 1325 // no synchronization necessary
1326 1326 #ifdef ASSERT
1327 1327 { Label L;
1328 1328 __ movl(rax, access_flags);
1329 1329 __ testl(rax, JVM_ACC_SYNCHRONIZED);
1330 1330 __ jcc(Assembler::zero, L);
1331 1331 __ stop("method needs synchronization");
1332 1332 __ bind(L);
1333 1333 }
1334 1334 #endif
1335 1335 }
1336 1336
1337 1337 // start execution
1338 1338 #ifdef ASSERT
1339 1339 { Label L;
1340 1340 const Address monitor_block_top (rbp,
1341 1341 frame::interpreter_frame_monitor_block_top_offset * wordSize);
1342 1342 __ movptr(rax, monitor_block_top);
1343 1343 __ cmpptr(rax, rsp);
1344 1344 __ jcc(Assembler::equal, L);
1345 1345 __ stop("broken stack frame setup in interpreter");
1346 1346 __ bind(L);
1347 1347 }
1348 1348 #endif
1349 1349
1350 1350 // jvmti support
1351 1351 __ notify_method_entry();
1352 1352
1353 1353 __ dispatch_next(vtos);
1354 1354
1355 1355 // invocation counter overflow
1356 1356 if (inc_counter) {
1357 1357 if (ProfileInterpreter) {
1358 1358 // We have decided to profile this method in the interpreter
1359 1359 __ bind(profile_method);
1360 1360 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1361 1361 __ set_method_data_pointer_for_bcp();
1362 1362 __ get_method(rbx);
1363 1363 __ jmp(profile_method_continue);
1364 1364 }
1365 1365 // Handle overflow of counter and compile method
1366 1366 __ bind(invocation_counter_overflow);
1367 1367 generate_counter_overflow(&continue_after_compile);
1368 1368 }
1369 1369
1370 1370 return entry_point;
1371 1371 }
1372 1372
1373 1373 //------------------------------------------------------------------------------------------------------------------------
1374 1374 // Entry points
1375 1375 //
1376 1376 // Here we generate the various kind of entries into the interpreter.
1377 1377 // The two main entry type are generic bytecode methods and native call method.
1378 1378 // These both come in synchronized and non-synchronized versions but the
1379 1379 // frame layout they create is very similar. The other method entry
1380 1380 // types are really just special purpose entries that are really entry
1381 1381 // and interpretation all in one. These are for trivial methods like
1382 1382 // accessor, empty, or special math methods.
1383 1383 //
1384 1384 // When control flow reaches any of the entry types for the interpreter
1385 1385 // the following holds ->
1386 1386 //
1387 1387 // Arguments:
1388 1388 //
1389 1389 // rbx,: methodOop
1390 1390 // rcx: receiver
1391 1391 //
1392 1392 //
1393 1393 // Stack layout immediately at entry
1394 1394 //
1395 1395 // [ return address ] <--- rsp
1396 1396 // [ parameter n ]
1397 1397 // ...
1398 1398 // [ parameter 1 ]
1399 1399 // [ expression stack ] (caller's java expression stack)
1400 1400
1401 1401 // Assuming that we don't go to one of the trivial specialized
1402 1402 // entries the stack will look like below when we are ready to execute
1403 1403 // the first bytecode (or call the native routine). The register usage
1404 1404 // will be as the template based interpreter expects (see interpreter_x86.hpp).
1405 1405 //
1406 1406 // local variables follow incoming parameters immediately; i.e.
1407 1407 // the return address is moved to the end of the locals).
1408 1408 //
1409 1409 // [ monitor entry ] <--- rsp
1410 1410 // ...
1411 1411 // [ monitor entry ]
1412 1412 // [ expr. stack bottom ]
1413 1413 // [ saved rsi ]
1414 1414 // [ current rdi ]
1415 1415 // [ methodOop ]
1416 1416 // [ saved rbp, ] <--- rbp,
1417 1417 // [ return address ]
1418 1418 // [ local variable m ]
1419 1419 // ...
1420 1420 // [ local variable 1 ]
1421 1421 // [ parameter n ]
1422 1422 // ...
1423 1423 // [ parameter 1 ] <--- rdi
1424 1424
1425 1425 address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
1426 1426 // determine code generation flags
1427 1427 bool synchronized = false;
1428 1428 address entry_point = NULL;
1429 1429
1430 1430 switch (kind) {
1431 1431 case Interpreter::zerolocals : break;
1432 1432 case Interpreter::zerolocals_synchronized: synchronized = true; break;
1433 1433 case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
1434 1434 case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
1435 1435 case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
1436 1436 case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
1437 1437 case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
1438 1438 case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break;
1439 1439
1440 1440 case Interpreter::java_lang_math_sin : // fall thru
1441 1441 case Interpreter::java_lang_math_cos : // fall thru
1442 1442 case Interpreter::java_lang_math_tan : // fall thru
1443 1443 case Interpreter::java_lang_math_abs : // fall thru
1444 1444 case Interpreter::java_lang_math_log : // fall thru
1445 1445 case Interpreter::java_lang_math_log10 : // fall thru
1446 1446 case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
1447 1447 default : ShouldNotReachHere(); break;
1448 1448 }
1449 1449
1450 1450 if (entry_point) return entry_point;
1451 1451
1452 1452 return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
1453 1453
1454 1454 }
1455 1455
1456 1456 // These should never be compiled since the interpreter will prefer
1457 1457 // the compiled version to the intrinsic version.
1458 1458 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
1459 1459 switch (method_kind(m)) {
1460 1460 case Interpreter::java_lang_math_sin : // fall thru
1461 1461 case Interpreter::java_lang_math_cos : // fall thru
1462 1462 case Interpreter::java_lang_math_tan : // fall thru
1463 1463 case Interpreter::java_lang_math_abs : // fall thru
1464 1464 case Interpreter::java_lang_math_log : // fall thru
1465 1465 case Interpreter::java_lang_math_log10 : // fall thru
1466 1466 case Interpreter::java_lang_math_sqrt :
1467 1467 return false;
1468 1468 default:
1469 1469 return true;
1470 1470 }
1471 1471 }
1472 1472
1473 1473 // How much stack a method activation needs in words.
1474 1474 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
1475 1475
1476 1476 const int stub_code = 4; // see generate_call_stub
1477 1477 // Save space for one monitor to get into the interpreted method in case
1478 1478 // the method is synchronized
1479 1479 int monitor_size = method->is_synchronized() ?
1480 1480 1*frame::interpreter_frame_monitor_size() : 0;
1481 1481
1482 1482 // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
1483 1483 // be sure to change this if you add/subtract anything to/from the overhead area
1484 1484 const int overhead_size = -frame::interpreter_frame_initial_sp_offset;
1485 1485
1486 1486 const int extra_stack = methodOopDesc::extra_stack_entries();
1487 1487 const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
1488 1488 Interpreter::stackElementWords;
1489 1489 return overhead_size + method_stack + stub_code;
1490 1490 }
1491 1491
1492 1492 // asm based interpreter deoptimization helpers
1493 1493
1494 1494 int AbstractInterpreter::layout_activation(methodOop method,
1495 1495 int tempcount,
1496 1496 int popframe_extra_args,
1497 1497 int moncount,
1498 1498 int callee_param_count,
1499 1499 int callee_locals,
1500 1500 frame* caller,
1501 1501 frame* interpreter_frame,
1502 1502 bool is_top_frame) {
1503 1503 // Note: This calculation must exactly parallel the frame setup
1504 1504 // in AbstractInterpreterGenerator::generate_method_entry.
1505 1505 // If interpreter_frame!=NULL, set up the method, locals, and monitors.
1506 1506 // The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
1507 1507 // as determined by a previous call to this method.
1508 1508 // It is also guaranteed to be walkable even though it is in a skeletal state
1509 1509 // NOTE: return size is in words not bytes
1510 1510
1511 1511 // fixed size of an interpreter frame:
1512 1512 int max_locals = method->max_locals() * Interpreter::stackElementWords;
1513 1513 int extra_locals = (method->max_locals() - method->size_of_parameters()) *
1514 1514 Interpreter::stackElementWords;
1515 1515
1516 1516 int overhead = frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset;
1517 1517
1518 1518 // Our locals were accounted for by the caller (or last_frame_adjust on the transistion)
1519 1519 // Since the callee parameters already account for the callee's params we only need to account for
↓ open down ↓ |
1519 lines elided |
↑ open up ↑ |
1520 1520 // the extra locals.
1521 1521
1522 1522
1523 1523 int size = overhead +
1524 1524 ((callee_locals - callee_param_count)*Interpreter::stackElementWords) +
1525 1525 (moncount*frame::interpreter_frame_monitor_size()) +
1526 1526 tempcount*Interpreter::stackElementWords + popframe_extra_args;
1527 1527
1528 1528 if (interpreter_frame != NULL) {
1529 1529 #ifdef ASSERT
1530 - if (!EnableMethodHandles)
1530 + if (!EnableInvokeDynamic)
1531 1531 // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
1532 1532 // Probably, since deoptimization doesn't work yet.
1533 1533 assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
1534 1534 assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
1535 1535 #endif
1536 1536
1537 1537 interpreter_frame->interpreter_frame_set_method(method);
1538 1538 // NOTE the difference in using sender_sp and interpreter_frame_sender_sp
1539 1539 // interpreter_frame_sender_sp is the original sp of the caller (the unextended_sp)
1540 1540 // and sender_sp is fp+8
1541 1541 intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
1542 1542
1543 1543 interpreter_frame->interpreter_frame_set_locals(locals);
1544 1544 BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
1545 1545 BasicObjectLock* monbot = montop - moncount;
1546 1546 interpreter_frame->interpreter_frame_set_monitor_end(monbot);
1547 1547
1548 1548 // Set last_sp
1549 1549 intptr_t* rsp = (intptr_t*) monbot -
1550 1550 tempcount*Interpreter::stackElementWords -
1551 1551 popframe_extra_args;
1552 1552 interpreter_frame->interpreter_frame_set_last_sp(rsp);
1553 1553
1554 1554 // All frames but the initial (oldest) interpreter frame we fill in have a
1555 1555 // value for sender_sp that allows walking the stack but isn't
1556 1556 // truly correct. Correct the value here.
1557 1557
1558 1558 if (extra_locals != 0 &&
1559 1559 interpreter_frame->sender_sp() == interpreter_frame->interpreter_frame_sender_sp() ) {
1560 1560 interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + extra_locals);
1561 1561 }
1562 1562 *interpreter_frame->interpreter_frame_cache_addr() =
1563 1563 method->constants()->cache();
1564 1564 }
1565 1565 return size;
1566 1566 }
1567 1567
1568 1568
1569 1569 //------------------------------------------------------------------------------------------------------------------------
1570 1570 // Exceptions
1571 1571
1572 1572 void TemplateInterpreterGenerator::generate_throw_exception() {
1573 1573 // Entry point in previous activation (i.e., if the caller was interpreted)
1574 1574 Interpreter::_rethrow_exception_entry = __ pc();
1575 1575 const Register thread = rcx;
1576 1576
1577 1577 // Restore sp to interpreter_frame_last_sp even though we are going
1578 1578 // to empty the expression stack for the exception processing.
1579 1579 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
1580 1580 // rax,: exception
1581 1581 // rdx: return address/pc that threw exception
1582 1582 __ restore_bcp(); // rsi points to call/send
1583 1583 __ restore_locals();
1584 1584
1585 1585 // Entry point for exceptions thrown within interpreter code
1586 1586 Interpreter::_throw_exception_entry = __ pc();
1587 1587 // expression stack is undefined here
1588 1588 // rax,: exception
1589 1589 // rsi: exception bcp
1590 1590 __ verify_oop(rax);
1591 1591
1592 1592 // expression stack must be empty before entering the VM in case of an exception
1593 1593 __ empty_expression_stack();
1594 1594 __ empty_FPU_stack();
1595 1595 // find exception handler address and preserve exception oop
1596 1596 __ call_VM(rdx, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), rax);
1597 1597 // rax,: exception handler entry point
1598 1598 // rdx: preserved exception oop
1599 1599 // rsi: bcp for exception handler
1600 1600 __ push_ptr(rdx); // push exception which is now the only value on the stack
1601 1601 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!)
1602 1602
1603 1603 // If the exception is not handled in the current frame the frame is removed and
1604 1604 // the exception is rethrown (i.e. exception continuation is _rethrow_exception).
1605 1605 //
1606 1606 // Note: At this point the bci is still the bxi for the instruction which caused
1607 1607 // the exception and the expression stack is empty. Thus, for any VM calls
1608 1608 // at this point, GC will find a legal oop map (with empty expression stack).
1609 1609
1610 1610 // In current activation
1611 1611 // tos: exception
1612 1612 // rsi: exception bcp
1613 1613
1614 1614 //
1615 1615 // JVMTI PopFrame support
1616 1616 //
1617 1617
1618 1618 Interpreter::_remove_activation_preserving_args_entry = __ pc();
1619 1619 __ empty_expression_stack();
1620 1620 __ empty_FPU_stack();
1621 1621 // Set the popframe_processing bit in pending_popframe_condition indicating that we are
1622 1622 // currently handling popframe, so that call_VMs that may happen later do not trigger new
1623 1623 // popframe handling cycles.
1624 1624 __ get_thread(thread);
1625 1625 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset()));
1626 1626 __ orl(rdx, JavaThread::popframe_processing_bit);
1627 1627 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx);
1628 1628
1629 1629 {
1630 1630 // Check to see whether we are returning to a deoptimized frame.
1631 1631 // (The PopFrame call ensures that the caller of the popped frame is
1632 1632 // either interpreted or compiled and deoptimizes it if compiled.)
1633 1633 // In this case, we can't call dispatch_next() after the frame is
1634 1634 // popped, but instead must save the incoming arguments and restore
1635 1635 // them after deoptimization has occurred.
1636 1636 //
1637 1637 // Note that we don't compare the return PC against the
1638 1638 // deoptimization blob's unpack entry because of the presence of
1639 1639 // adapter frames in C2.
1640 1640 Label caller_not_deoptimized;
1641 1641 __ movptr(rdx, Address(rbp, frame::return_addr_offset * wordSize));
1642 1642 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), rdx);
1643 1643 __ testl(rax, rax);
1644 1644 __ jcc(Assembler::notZero, caller_not_deoptimized);
1645 1645
1646 1646 // Compute size of arguments for saving when returning to deoptimized caller
1647 1647 __ get_method(rax);
1648 1648 __ verify_oop(rax);
1649 1649 __ load_unsigned_short(rax, Address(rax, in_bytes(methodOopDesc::size_of_parameters_offset())));
1650 1650 __ shlptr(rax, Interpreter::logStackElementSize);
1651 1651 __ restore_locals();
1652 1652 __ subptr(rdi, rax);
1653 1653 __ addptr(rdi, wordSize);
1654 1654 // Save these arguments
1655 1655 __ get_thread(thread);
1656 1656 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), thread, rax, rdi);
1657 1657
1658 1658 __ remove_activation(vtos, rdx,
1659 1659 /* throw_monitor_exception */ false,
1660 1660 /* install_monitor_exception */ false,
1661 1661 /* notify_jvmdi */ false);
1662 1662
1663 1663 // Inform deoptimization that it is responsible for restoring these arguments
1664 1664 __ get_thread(thread);
1665 1665 __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_force_deopt_reexecution_bit);
1666 1666
1667 1667 // Continue in deoptimization handler
1668 1668 __ jmp(rdx);
1669 1669
1670 1670 __ bind(caller_not_deoptimized);
1671 1671 }
1672 1672
1673 1673 __ remove_activation(vtos, rdx,
1674 1674 /* throw_monitor_exception */ false,
1675 1675 /* install_monitor_exception */ false,
1676 1676 /* notify_jvmdi */ false);
1677 1677
1678 1678 // Finish with popframe handling
1679 1679 // A previous I2C followed by a deoptimization might have moved the
1680 1680 // outgoing arguments further up the stack. PopFrame expects the
1681 1681 // mutations to those outgoing arguments to be preserved and other
1682 1682 // constraints basically require this frame to look exactly as
1683 1683 // though it had previously invoked an interpreted activation with
1684 1684 // no space between the top of the expression stack (current
1685 1685 // last_sp) and the top of stack. Rather than force deopt to
1686 1686 // maintain this kind of invariant all the time we call a small
1687 1687 // fixup routine to move the mutated arguments onto the top of our
1688 1688 // expression stack if necessary.
1689 1689 __ mov(rax, rsp);
1690 1690 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1691 1691 __ get_thread(thread);
1692 1692 // PC must point into interpreter here
1693 1693 __ set_last_Java_frame(thread, noreg, rbp, __ pc());
1694 1694 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx);
1695 1695 __ get_thread(thread);
1696 1696 __ reset_last_Java_frame(thread, true, true);
1697 1697 // Restore the last_sp and null it out
1698 1698 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1699 1699 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
1700 1700
1701 1701 __ restore_bcp();
1702 1702 __ restore_locals();
1703 1703 // The method data pointer was incremented already during
1704 1704 // call profiling. We have to restore the mdp for the current bcp.
1705 1705 if (ProfileInterpreter) {
1706 1706 __ set_method_data_pointer_for_bcp();
1707 1707 }
1708 1708
1709 1709 // Clear the popframe condition flag
1710 1710 __ get_thread(thread);
1711 1711 __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive);
1712 1712
1713 1713 __ dispatch_next(vtos);
1714 1714 // end of PopFrame support
1715 1715
1716 1716 Interpreter::_remove_activation_entry = __ pc();
1717 1717
1718 1718 // preserve exception over this code sequence
1719 1719 __ pop_ptr(rax);
1720 1720 __ get_thread(thread);
1721 1721 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax);
1722 1722 // remove the activation (without doing throws on illegalMonitorExceptions)
1723 1723 __ remove_activation(vtos, rdx, false, true, false);
1724 1724 // restore exception
1725 1725 __ get_thread(thread);
1726 1726 __ movptr(rax, Address(thread, JavaThread::vm_result_offset()));
1727 1727 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
1728 1728 __ verify_oop(rax);
1729 1729
1730 1730 // Inbetween activations - previous activation type unknown yet
1731 1731 // compute continuation point - the continuation point expects
1732 1732 // the following registers set up:
1733 1733 //
1734 1734 // rax: exception
1735 1735 // rdx: return address/pc that threw exception
1736 1736 // rsp: expression stack of caller
1737 1737 // rbp: rbp, of caller
1738 1738 __ push(rax); // save exception
1739 1739 __ push(rdx); // save return address
1740 1740 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, rdx);
1741 1741 __ mov(rbx, rax); // save exception handler
1742 1742 __ pop(rdx); // restore return address
1743 1743 __ pop(rax); // restore exception
1744 1744 // Note that an "issuing PC" is actually the next PC after the call
1745 1745 __ jmp(rbx); // jump to exception handler of caller
1746 1746 }
1747 1747
1748 1748
1749 1749 //
1750 1750 // JVMTI ForceEarlyReturn support
1751 1751 //
1752 1752 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1753 1753 address entry = __ pc();
1754 1754 const Register thread = rcx;
1755 1755
1756 1756 __ restore_bcp();
1757 1757 __ restore_locals();
1758 1758 __ empty_expression_stack();
1759 1759 __ empty_FPU_stack();
1760 1760 __ load_earlyret_value(state);
1761 1761
1762 1762 __ get_thread(thread);
1763 1763 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));
1764 1764 const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
1765 1765
1766 1766 // Clear the earlyret state
1767 1767 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
1768 1768
1769 1769 __ remove_activation(state, rsi,
1770 1770 false, /* throw_monitor_exception */
1771 1771 false, /* install_monitor_exception */
1772 1772 true); /* notify_jvmdi */
1773 1773 __ jmp(rsi);
1774 1774 return entry;
1775 1775 } // end of ForceEarlyReturn support
1776 1776
1777 1777
1778 1778 //------------------------------------------------------------------------------------------------------------------------
1779 1779 // Helper for vtos entry point generation
1780 1780
1781 1781 void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
1782 1782 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1783 1783 Label L;
1784 1784 fep = __ pc(); __ push(ftos); __ jmp(L);
1785 1785 dep = __ pc(); __ push(dtos); __ jmp(L);
1786 1786 lep = __ pc(); __ push(ltos); __ jmp(L);
1787 1787 aep = __ pc(); __ push(atos); __ jmp(L);
1788 1788 bep = cep = sep = // fall through
1789 1789 iep = __ pc(); __ push(itos); // fall through
1790 1790 vep = __ pc(); __ bind(L); // fall through
1791 1791 generate_and_dispatch(t);
1792 1792 }
1793 1793
1794 1794 //------------------------------------------------------------------------------------------------------------------------
1795 1795 // Generation of individual instructions
1796 1796
1797 1797 // helpers for generate_and_dispatch
1798 1798
1799 1799
1800 1800
1801 1801 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
1802 1802 : TemplateInterpreterGenerator(code) {
1803 1803 generate_all(); // down here so it can be "virtual"
1804 1804 }
1805 1805
1806 1806 //------------------------------------------------------------------------------------------------------------------------
1807 1807
1808 1808 // Non-product code
1809 1809 #ifndef PRODUCT
1810 1810 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1811 1811 address entry = __ pc();
1812 1812
1813 1813 // prepare expression stack
1814 1814 __ pop(rcx); // pop return address so expression stack is 'pure'
1815 1815 __ push(state); // save tosca
1816 1816
1817 1817 // pass tosca registers as arguments & call tracer
1818 1818 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx);
1819 1819 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state)
1820 1820 __ pop(state); // restore tosca
1821 1821
1822 1822 // return
1823 1823 __ jmp(rcx);
1824 1824
1825 1825 return entry;
1826 1826 }
1827 1827
1828 1828
1829 1829 void TemplateInterpreterGenerator::count_bytecode() {
1830 1830 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
1831 1831 }
1832 1832
1833 1833
1834 1834 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1835 1835 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
1836 1836 }
1837 1837
1838 1838
1839 1839 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1840 1840 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
1841 1841 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
1842 1842 __ orl(rbx, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1843 1843 ExternalAddress table((address) BytecodePairHistogram::_counters);
1844 1844 Address index(noreg, rbx, Address::times_4);
1845 1845 __ incrementl(ArrayAddress(table, index));
1846 1846 }
1847 1847
1848 1848
1849 1849 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1850 1850 // Call a little run-time stub to avoid blow-up for each bytecode.
1851 1851 // The run-time runtime saves the right registers, depending on
1852 1852 // the tosca in-state for the given template.
1853 1853 assert(Interpreter::trace_code(t->tos_in()) != NULL,
1854 1854 "entry must have been generated");
1855 1855 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
1856 1856 }
1857 1857
1858 1858
1859 1859 void TemplateInterpreterGenerator::stop_interpreter_at() {
1860 1860 Label L;
1861 1861 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
1862 1862 StopInterpreterAt);
1863 1863 __ jcc(Assembler::notEqual, L);
1864 1864 __ int3();
1865 1865 __ bind(L);
1866 1866 }
1867 1867 #endif // !PRODUCT
1868 1868 #endif // CC_INTERP
↓ open down ↓ |
328 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX