Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
+++ new/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
1 1 /*
2 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "asm/assembler.hpp"
27 27 #include "interpreter/bytecodeHistogram.hpp"
28 28 #include "interpreter/interpreter.hpp"
29 29 #include "interpreter/interpreterGenerator.hpp"
30 30 #include "interpreter/interpreterRuntime.hpp"
31 31 #include "interpreter/templateTable.hpp"
32 32 #include "oops/arrayOop.hpp"
33 33 #include "oops/methodDataOop.hpp"
34 34 #include "oops/methodOop.hpp"
35 35 #include "oops/oop.inline.hpp"
36 36 #include "prims/jvmtiExport.hpp"
37 37 #include "prims/jvmtiThreadState.hpp"
38 38 #include "runtime/arguments.hpp"
39 39 #include "runtime/deoptimization.hpp"
40 40 #include "runtime/frame.inline.hpp"
41 41 #include "runtime/sharedRuntime.hpp"
42 42 #include "runtime/stubRoutines.hpp"
43 43 #include "runtime/synchronizer.hpp"
44 44 #include "runtime/timer.hpp"
45 45 #include "runtime/vframeArray.hpp"
46 46 #include "utilities/debug.hpp"
47 47
48 48 #define __ _masm->
49 49
50 50
51 51 #ifndef CC_INTERP
52 52 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
53 53 const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize;
54 54 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
55 55
56 56 //------------------------------------------------------------------------------------------------------------------------
57 57
58 58 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
59 59 address entry = __ pc();
60 60
61 61 // Note: There should be a minimal interpreter frame set up when stack
62 62 // overflow occurs since we check explicitly for it now.
63 63 //
64 64 #ifdef ASSERT
65 65 { Label L;
66 66 __ lea(rax, Address(rbp,
67 67 frame::interpreter_frame_monitor_block_top_offset * wordSize));
68 68 __ cmpptr(rax, rsp); // rax, = maximal rsp for current rbp,
69 69 // (stack grows negative)
70 70 __ jcc(Assembler::aboveEqual, L); // check if frame is complete
71 71 __ stop ("interpreter frame not set up");
72 72 __ bind(L);
73 73 }
74 74 #endif // ASSERT
75 75 // Restore bcp under the assumption that the current frame is still
76 76 // interpreted
77 77 __ restore_bcp();
78 78
79 79 // expression stack must be empty before entering the VM if an exception
80 80 // happened
81 81 __ empty_expression_stack();
82 82 __ empty_FPU_stack();
83 83 // throw exception
84 84 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
85 85 return entry;
86 86 }
87 87
88 88 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
89 89 address entry = __ pc();
90 90 // expression stack must be empty before entering the VM if an exception happened
91 91 __ empty_expression_stack();
92 92 __ empty_FPU_stack();
93 93 // setup parameters
94 94 // ??? convention: expect aberrant index in register rbx,
95 95 __ lea(rax, ExternalAddress((address)name));
96 96 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), rax, rbx);
97 97 return entry;
98 98 }
99 99
100 100 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
101 101 address entry = __ pc();
102 102 // object is at TOS
103 103 __ pop(rax);
104 104 // expression stack must be empty before entering the VM if an exception
105 105 // happened
106 106 __ empty_expression_stack();
107 107 __ empty_FPU_stack();
108 108 __ call_VM(noreg,
109 109 CAST_FROM_FN_PTR(address,
110 110 InterpreterRuntime::throw_ClassCastException),
111 111 rax);
112 112 return entry;
113 113 }
114 114
115 115 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
116 116 assert(!pass_oop || message == NULL, "either oop or message but not both");
117 117 address entry = __ pc();
118 118 if (pass_oop) {
119 119 // object is at TOS
120 120 __ pop(rbx);
121 121 }
122 122 // expression stack must be empty before entering the VM if an exception happened
123 123 __ empty_expression_stack();
124 124 __ empty_FPU_stack();
125 125 // setup parameters
126 126 __ lea(rax, ExternalAddress((address)name));
127 127 if (pass_oop) {
128 128 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), rax, rbx);
129 129 } else {
130 130 if (message != NULL) {
131 131 __ lea(rbx, ExternalAddress((address)message));
132 132 } else {
133 133 __ movptr(rbx, NULL_WORD);
134 134 }
135 135 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), rax, rbx);
136 136 }
137 137 // throw exception
138 138 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
139 139 return entry;
140 140 }
141 141
142 142
143 143 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
144 144 address entry = __ pc();
145 145 // NULL last_sp until next java call
146 146 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
147 147 __ dispatch_next(state);
148 148 return entry;
149 149 }
150 150
151 151
152 152 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
153 153 TosState incoming_state = state;
154 154 address entry = __ pc();
155 155
156 156 #ifdef COMPILER2
157 157 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
158 158 if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) {
159 159 for (int i = 1; i < 8; i++) {
160 160 __ ffree(i);
161 161 }
162 162 } else if (UseSSE < 2) {
163 163 __ empty_FPU_stack();
164 164 }
165 165 #endif
166 166 if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) {
167 167 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
168 168 } else {
169 169 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
170 170 }
171 171
172 172 // In SSE mode, interpreter returns FP results in xmm0 but they need
173 173 // to end up back on the FPU so it can operate on them.
174 174 if (incoming_state == ftos && UseSSE >= 1) {
175 175 __ subptr(rsp, wordSize);
176 176 __ movflt(Address(rsp, 0), xmm0);
177 177 __ fld_s(Address(rsp, 0));
178 178 __ addptr(rsp, wordSize);
179 179 } else if (incoming_state == dtos && UseSSE >= 2) {
180 180 __ subptr(rsp, 2*wordSize);
181 181 __ movdbl(Address(rsp, 0), xmm0);
182 182 __ fld_d(Address(rsp, 0));
183 183 __ addptr(rsp, 2*wordSize);
184 184 }
185 185
186 186 __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_return_entry_for in interpreter");
187 187
188 188 // Restore stack bottom in case i2c adjusted stack
189 189 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
190 190 // and NULL it as marker that rsp is now tos until next java call
191 191 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
192 192
193 193 __ restore_bcp();
194 194 __ restore_locals();
195 195
196 196 Label L_got_cache, L_giant_index;
197 197 if (EnableInvokeDynamic) {
198 198 __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
199 199 __ jcc(Assembler::equal, L_giant_index);
200 200 }
201 201 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
202 202 __ bind(L_got_cache);
203 203 __ movl(rbx, Address(rbx, rcx,
204 204 Address::times_ptr, constantPoolCacheOopDesc::base_offset() +
205 205 ConstantPoolCacheEntry::flags_offset()));
206 206 __ andptr(rbx, 0xFF);
207 207 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));
208 208 __ dispatch_next(state, step);
209 209
210 210 // out of the main line of code...
211 211 if (EnableInvokeDynamic) {
212 212 __ bind(L_giant_index);
213 213 __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4));
214 214 __ jmp(L_got_cache);
215 215 }
216 216
217 217 return entry;
218 218 }
219 219
220 220
221 221 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
222 222 address entry = __ pc();
223 223
224 224 // In SSE mode, FP results are in xmm0
225 225 if (state == ftos && UseSSE > 0) {
226 226 __ subptr(rsp, wordSize);
227 227 __ movflt(Address(rsp, 0), xmm0);
228 228 __ fld_s(Address(rsp, 0));
229 229 __ addptr(rsp, wordSize);
230 230 } else if (state == dtos && UseSSE >= 2) {
231 231 __ subptr(rsp, 2*wordSize);
232 232 __ movdbl(Address(rsp, 0), xmm0);
233 233 __ fld_d(Address(rsp, 0));
234 234 __ addptr(rsp, 2*wordSize);
235 235 }
236 236
237 237 __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_deopt_entry_for in interpreter");
238 238
239 239 // The stack is not extended by deopt but we must NULL last_sp as this
240 240 // entry is like a "return".
241 241 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
242 242 __ restore_bcp();
243 243 __ restore_locals();
244 244 // handle exceptions
245 245 { Label L;
246 246 const Register thread = rcx;
247 247 __ get_thread(thread);
248 248 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
249 249 __ jcc(Assembler::zero, L);
250 250 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
251 251 __ should_not_reach_here();
252 252 __ bind(L);
253 253 }
254 254 __ dispatch_next(state, step);
255 255 return entry;
256 256 }
257 257
258 258
259 259 int AbstractInterpreter::BasicType_as_index(BasicType type) {
260 260 int i = 0;
261 261 switch (type) {
262 262 case T_BOOLEAN: i = 0; break;
263 263 case T_CHAR : i = 1; break;
264 264 case T_BYTE : i = 2; break;
265 265 case T_SHORT : i = 3; break;
266 266 case T_INT : // fall through
267 267 case T_LONG : // fall through
268 268 case T_VOID : i = 4; break;
269 269 case T_FLOAT : i = 5; break; // have to treat float and double separately for SSE
270 270 case T_DOUBLE : i = 6; break;
271 271 case T_OBJECT : // fall through
272 272 case T_ARRAY : i = 7; break;
273 273 default : ShouldNotReachHere();
274 274 }
275 275 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
276 276 return i;
277 277 }
278 278
279 279
280 280 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
281 281 address entry = __ pc();
282 282 switch (type) {
283 283 case T_BOOLEAN: __ c2bool(rax); break;
284 284 case T_CHAR : __ andptr(rax, 0xFFFF); break;
285 285 case T_BYTE : __ sign_extend_byte (rax); break;
286 286 case T_SHORT : __ sign_extend_short(rax); break;
287 287 case T_INT : /* nothing to do */ break;
288 288 case T_DOUBLE :
289 289 case T_FLOAT :
290 290 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
291 291 __ pop(t); // remove return address first
292 292 // Must return a result for interpreter or compiler. In SSE
293 293 // mode, results are returned in xmm0 and the FPU stack must
294 294 // be empty.
295 295 if (type == T_FLOAT && UseSSE >= 1) {
296 296 // Load ST0
297 297 __ fld_d(Address(rsp, 0));
298 298 // Store as float and empty fpu stack
299 299 __ fstp_s(Address(rsp, 0));
300 300 // and reload
301 301 __ movflt(xmm0, Address(rsp, 0));
302 302 } else if (type == T_DOUBLE && UseSSE >= 2 ) {
303 303 __ movdbl(xmm0, Address(rsp, 0));
304 304 } else {
305 305 // restore ST0
306 306 __ fld_d(Address(rsp, 0));
307 307 }
308 308 // and pop the temp
309 309 __ addptr(rsp, 2 * wordSize);
310 310 __ push(t); // restore return address
311 311 }
312 312 break;
313 313 case T_OBJECT :
314 314 // retrieve result from frame
315 315 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
316 316 // and verify it
317 317 __ verify_oop(rax);
318 318 break;
319 319 default : ShouldNotReachHere();
320 320 }
321 321 __ ret(0); // return from result handler
322 322 return entry;
323 323 }
324 324
325 325 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
326 326 address entry = __ pc();
327 327 __ push(state);
328 328 __ call_VM(noreg, runtime_entry);
329 329 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
330 330 return entry;
331 331 }
332 332
333 333
334 334 // Helpers for commoning out cases in the various type of method entries.
335 335 //
336 336
337 337 // increment invocation count & check for overflow
338 338 //
339 339 // Note: checking for negative value instead of overflow
340 340 // so we have a 'sticky' overflow test
341 341 //
342 342 // rbx,: method
343 343 // rcx: invocation counter
344 344 //
345 345 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
346 346 const Address invocation_counter(rbx, in_bytes(methodOopDesc::invocation_counter_offset()) +
347 347 in_bytes(InvocationCounter::counter_offset()));
348 348 // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
349 349 if (TieredCompilation) {
350 350 int increment = InvocationCounter::count_increment;
351 351 int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
352 352 Label no_mdo, done;
353 353 if (ProfileInterpreter) {
354 354 // Are we profiling?
355 355 __ movptr(rax, Address(rbx, methodOopDesc::method_data_offset()));
356 356 __ testptr(rax, rax);
357 357 __ jccb(Assembler::zero, no_mdo);
358 358 // Increment counter in the MDO
359 359 const Address mdo_invocation_counter(rax, in_bytes(methodDataOopDesc::invocation_counter_offset()) +
360 360 in_bytes(InvocationCounter::counter_offset()));
361 361 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
362 362 __ jmpb(done);
363 363 }
364 364 __ bind(no_mdo);
365 365 // Increment counter in methodOop (we don't need to load it, it's in rcx).
366 366 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow);
367 367 __ bind(done);
368 368 } else {
369 369 const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() +
370 370 InvocationCounter::counter_offset());
371 371
372 372 if (ProfileInterpreter) { // %%% Merge this into methodDataOop
373 373 __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
374 374 }
375 375 // Update standard invocation counters
376 376 __ movl(rax, backedge_counter); // load backedge counter
377 377
378 378 __ incrementl(rcx, InvocationCounter::count_increment);
379 379 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
380 380
381 381 __ movl(invocation_counter, rcx); // save invocation count
382 382 __ addl(rcx, rax); // add both counters
383 383
384 384 // profile_method is non-null only for interpreted method so
385 385 // profile_method != NULL == !native_call
386 386 // BytecodeInterpreter only calls for native so code is elided.
387 387
388 388 if (ProfileInterpreter && profile_method != NULL) {
389 389 // Test to see if we should create a method data oop
390 390 __ cmp32(rcx,
391 391 ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
392 392 __ jcc(Assembler::less, *profile_method_continue);
393 393
394 394 // if no method data exists, go to profile_method
395 395 __ test_method_data_pointer(rax, *profile_method);
396 396 }
397 397
398 398 __ cmp32(rcx,
399 399 ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
400 400 __ jcc(Assembler::aboveEqual, *overflow);
401 401 }
402 402 }
403 403
404 404 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
405 405
406 406 // Asm interpreter on entry
407 407 // rdi - locals
408 408 // rsi - bcp
409 409 // rbx, - method
410 410 // rdx - cpool
411 411 // rbp, - interpreter frame
412 412
413 413 // C++ interpreter on entry
414 414 // rsi - new interpreter state pointer
415 415 // rbp - interpreter frame pointer
416 416 // rbx - method
417 417
418 418 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
419 419 // rbx, - method
420 420 // rcx - rcvr (assuming there is one)
421 421 // top of stack return address of interpreter caller
422 422 // rsp - sender_sp
423 423
424 424 // C++ interpreter only
425 425 // rsi - previous interpreter state pointer
426 426
427 427 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
428 428
429 429 // InterpreterRuntime::frequency_counter_overflow takes one argument
430 430 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
431 431 // The call returns the address of the verified entry point for the method or NULL
432 432 // if the compilation did not complete (either went background or bailed out).
433 433 __ movptr(rax, (intptr_t)false);
434 434 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
435 435
436 436 __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
437 437
438 438 // Preserve invariant that rsi/rdi contain bcp/locals of sender frame
439 439 // and jump to the interpreted entry.
440 440 __ jmp(*do_continue, relocInfo::none);
441 441
442 442 }
443 443
444 444 void InterpreterGenerator::generate_stack_overflow_check(void) {
445 445 // see if we've got enough room on the stack for locals plus overhead.
446 446 // the expression stack grows down incrementally, so the normal guard
447 447 // page mechanism will work for that.
448 448 //
449 449 // Registers live on entry:
450 450 //
451 451 // Asm interpreter
452 452 // rdx: number of additional locals this frame needs (what we must check)
453 453 // rbx,: methodOop
454 454
455 455 // destroyed on exit
456 456 // rax,
457 457
458 458 // NOTE: since the additional locals are also always pushed (wasn't obvious in
459 459 // generate_method_entry) so the guard should work for them too.
460 460 //
461 461
462 462 // monitor entry size: see picture of stack set (generate_method_entry) and frame_x86.hpp
463 463 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
464 464
465 465 // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
466 466 // be sure to change this if you add/subtract anything to/from the overhead area
467 467 const int overhead_size = -(frame::interpreter_frame_initial_sp_offset*wordSize) + entry_size;
468 468
469 469 const int page_size = os::vm_page_size();
470 470
471 471 Label after_frame_check;
472 472
473 473 // see if the frame is greater than one page in size. If so,
474 474 // then we need to verify there is enough stack space remaining
475 475 // for the additional locals.
476 476 __ cmpl(rdx, (page_size - overhead_size)/Interpreter::stackElementSize);
477 477 __ jcc(Assembler::belowEqual, after_frame_check);
478 478
479 479 // compute rsp as if this were going to be the last frame on
480 480 // the stack before the red zone
481 481
482 482 Label after_frame_check_pop;
483 483
484 484 __ push(rsi);
485 485
486 486 const Register thread = rsi;
487 487
488 488 __ get_thread(thread);
489 489
490 490 const Address stack_base(thread, Thread::stack_base_offset());
491 491 const Address stack_size(thread, Thread::stack_size_offset());
492 492
493 493 // locals + overhead, in bytes
494 494 __ lea(rax, Address(noreg, rdx, Interpreter::stackElementScale(), overhead_size));
495 495
496 496 #ifdef ASSERT
497 497 Label stack_base_okay, stack_size_okay;
498 498 // verify that thread stack base is non-zero
499 499 __ cmpptr(stack_base, (int32_t)NULL_WORD);
500 500 __ jcc(Assembler::notEqual, stack_base_okay);
501 501 __ stop("stack base is zero");
502 502 __ bind(stack_base_okay);
503 503 // verify that thread stack size is non-zero
504 504 __ cmpptr(stack_size, 0);
505 505 __ jcc(Assembler::notEqual, stack_size_okay);
506 506 __ stop("stack size is zero");
507 507 __ bind(stack_size_okay);
508 508 #endif
509 509
510 510 // Add stack base to locals and subtract stack size
511 511 __ addptr(rax, stack_base);
512 512 __ subptr(rax, stack_size);
513 513
514 514 // Use the maximum number of pages we might bang.
515 515 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
516 516 (StackRedPages+StackYellowPages);
517 517 __ addptr(rax, max_pages * page_size);
518 518
519 519 // check against the current stack bottom
520 520 __ cmpptr(rsp, rax);
521 521 __ jcc(Assembler::above, after_frame_check_pop);
522 522
523 523 __ pop(rsi); // get saved bcp / (c++ prev state ).
524 524
525 525 __ pop(rax); // get return address
526 526 __ jump(ExternalAddress(Interpreter::throw_StackOverflowError_entry()));
527 527
528 528 // all done with frame size check
529 529 __ bind(after_frame_check_pop);
530 530 __ pop(rsi);
531 531
532 532 __ bind(after_frame_check);
533 533 }
534 534
535 535 // Allocate monitor and lock method (asm interpreter)
536 536 // rbx, - methodOop
537 537 //
538 538 void InterpreterGenerator::lock_method(void) {
539 539 // synchronize method
540 540 const Address access_flags (rbx, methodOopDesc::access_flags_offset());
541 541 const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
542 542 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
543 543
544 544 #ifdef ASSERT
545 545 { Label L;
546 546 __ movl(rax, access_flags);
547 547 __ testl(rax, JVM_ACC_SYNCHRONIZED);
548 548 __ jcc(Assembler::notZero, L);
549 549 __ stop("method doesn't need synchronization");
550 550 __ bind(L);
551 551 }
552 552 #endif // ASSERT
553 553 // get synchronization object
554 554 { Label done;
555 555 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
556 556 __ movl(rax, access_flags);
557 557 __ testl(rax, JVM_ACC_STATIC);
558 558 __ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case)
559 559 __ jcc(Assembler::zero, done);
560 560 __ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
561 561 __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
562 562 __ movptr(rax, Address(rax, mirror_offset));
563 563 __ bind(done);
564 564 }
565 565 // add space for monitor & lock
566 566 __ subptr(rsp, entry_size); // add space for a monitor entry
567 567 __ movptr(monitor_block_top, rsp); // set new monitor block top
568 568 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
569 569 __ mov(rdx, rsp); // object address
570 570 __ lock_object(rdx);
571 571 }
572 572
573 573 //
574 574 // Generate a fixed interpreter frame. This is identical setup for interpreted methods
575 575 // and for native methods hence the shared code.
576 576
577 577 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
578 578 // initialize fixed part of activation frame
579 579 __ push(rax); // save return address
580 580 __ enter(); // save old & set new rbp,
581 581
582 582
583 583 __ push(rsi); // set sender sp
584 584 __ push((int32_t)NULL_WORD); // leave last_sp as null
585 585 __ movptr(rsi, Address(rbx,methodOopDesc::const_offset())); // get constMethodOop
586 586 __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase
587 587 __ push(rbx); // save methodOop
588 588 if (ProfileInterpreter) {
589 589 Label method_data_continue;
590 590 __ movptr(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
591 591 __ testptr(rdx, rdx);
592 592 __ jcc(Assembler::zero, method_data_continue);
593 593 __ addptr(rdx, in_bytes(methodDataOopDesc::data_offset()));
594 594 __ bind(method_data_continue);
595 595 __ push(rdx); // set the mdp (method data pointer)
596 596 } else {
597 597 __ push(0);
598 598 }
599 599
600 600 __ movptr(rdx, Address(rbx, methodOopDesc::constants_offset()));
601 601 __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
602 602 __ push(rdx); // set constant pool cache
603 603 __ push(rdi); // set locals pointer
604 604 if (native_call) {
605 605 __ push(0); // no bcp
606 606 } else {
607 607 __ push(rsi); // set bcp
608 608 }
609 609 __ push(0); // reserve word for pointer to expression stack bottom
610 610 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
611 611 }
612 612
613 613 // End of helpers
614 614
615 615 //
616 616 // Various method entries
617 617 //------------------------------------------------------------------------------------------------------------------------
618 618 //
619 619 //
620 620
621 621 // Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
622 622
623 623 address InterpreterGenerator::generate_accessor_entry(void) {
624 624
625 625 // rbx,: methodOop
626 626 // rcx: receiver (preserve for slow entry into asm interpreter)
627 627
628 628 // rsi: senderSP must preserved for slow path, set SP to it on fast path
629 629
630 630 address entry_point = __ pc();
631 631 Label xreturn_path;
632 632
633 633 // do fastpath for resolved accessor methods
634 634 if (UseFastAccessorMethods) {
635 635 Label slow_path;
636 636 // If we need a safepoint check, generate full interpreter entry.
637 637 ExternalAddress state(SafepointSynchronize::address_of_state());
638 638 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
639 639 SafepointSynchronize::_not_synchronized);
640 640
641 641 __ jcc(Assembler::notEqual, slow_path);
642 642 // ASM/C++ Interpreter
643 643 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
644 644 // Note: We can only use this code if the getfield has been resolved
645 645 // and if we don't have a null-pointer exception => check for
646 646 // these conditions first and use slow path if necessary.
647 647 // rbx,: method
648 648 // rcx: receiver
649 649 __ movptr(rax, Address(rsp, wordSize));
650 650
651 651 // check if local 0 != NULL and read field
652 652 __ testptr(rax, rax);
653 653 __ jcc(Assembler::zero, slow_path);
654 654
655 655 __ movptr(rdi, Address(rbx, methodOopDesc::constants_offset()));
656 656 // read first instruction word and extract bytecode @ 1 and index @ 2
657 657 __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
658 658 __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
659 659 // Shift codes right to get the index on the right.
660 660 // The bytecode fetched looks like <index><0xb4><0x2a>
661 661 __ shrl(rdx, 2*BitsPerByte);
662 662 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
663 663 __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
664 664
665 665 // rax,: local 0
666 666 // rbx,: method
667 667 // rcx: receiver - do not destroy since it is needed for slow path!
668 668 // rcx: scratch
669 669 // rdx: constant pool cache index
670 670 // rdi: constant pool cache
671 671 // rsi: sender sp
672 672
673 673 // check if getfield has been resolved and read constant pool cache entry
674 674 // check the validity of the cache entry by testing whether _indices field
675 675 // contains Bytecode::_getfield in b1 byte.
676 676 assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
677 677 __ movl(rcx,
678 678 Address(rdi,
679 679 rdx,
680 680 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
681 681 __ shrl(rcx, 2*BitsPerByte);
682 682 __ andl(rcx, 0xFF);
683 683 __ cmpl(rcx, Bytecodes::_getfield);
684 684 __ jcc(Assembler::notEqual, slow_path);
685 685
686 686 // Note: constant pool entry is not valid before bytecode is resolved
687 687 __ movptr(rcx,
688 688 Address(rdi,
689 689 rdx,
690 690 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()));
691 691 __ movl(rdx,
692 692 Address(rdi,
693 693 rdx,
694 694 Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset()));
695 695
696 696 Label notByte, notShort, notChar;
697 697 const Address field_address (rax, rcx, Address::times_1);
698 698
699 699 // Need to differentiate between igetfield, agetfield, bgetfield etc.
700 700 // because they are different sizes.
701 701 // Use the type from the constant pool cache
702 702 __ shrl(rdx, ConstantPoolCacheEntry::tosBits);
703 703 // Make sure we don't need to mask rdx for tosBits after the above shift
704 704 ConstantPoolCacheEntry::verify_tosBits();
705 705 __ cmpl(rdx, btos);
706 706 __ jcc(Assembler::notEqual, notByte);
707 707 __ load_signed_byte(rax, field_address);
708 708 __ jmp(xreturn_path);
709 709
710 710 __ bind(notByte);
711 711 __ cmpl(rdx, stos);
712 712 __ jcc(Assembler::notEqual, notShort);
713 713 __ load_signed_short(rax, field_address);
714 714 __ jmp(xreturn_path);
715 715
716 716 __ bind(notShort);
717 717 __ cmpl(rdx, ctos);
718 718 __ jcc(Assembler::notEqual, notChar);
719 719 __ load_unsigned_short(rax, field_address);
720 720 __ jmp(xreturn_path);
721 721
722 722 __ bind(notChar);
723 723 #ifdef ASSERT
724 724 Label okay;
725 725 __ cmpl(rdx, atos);
726 726 __ jcc(Assembler::equal, okay);
727 727 __ cmpl(rdx, itos);
728 728 __ jcc(Assembler::equal, okay);
729 729 __ stop("what type is this?");
730 730 __ bind(okay);
731 731 #endif // ASSERT
732 732 // All the rest are a 32 bit wordsize
733 733 // This is ok for now. Since fast accessors should be going away
734 734 __ movptr(rax, field_address);
735 735
736 736 __ bind(xreturn_path);
737 737
738 738 // _ireturn/_areturn
739 739 __ pop(rdi); // get return address
740 740 __ mov(rsp, rsi); // set sp to sender sp
741 741 __ jmp(rdi);
742 742
743 743 // generate a vanilla interpreter entry as the slow path
744 744 __ bind(slow_path);
745 745
746 746 (void) generate_normal_entry(false);
747 747 return entry_point;
748 748 }
749 749 return NULL;
750 750
751 751 }
752 752
753 753 // Method entry for java.lang.ref.Reference.get.
754 754 address InterpreterGenerator::generate_Reference_get_entry(void) {
755 755 #ifndef SERIALGC
756 756 // Code: _aload_0, _getfield, _areturn
757 757 // parameter size = 1
758 758 //
759 759 // The code that gets generated by this routine is split into 2 parts:
760 760 // 1. The "intrinsified" code for G1 (or any SATB based GC),
761 761 // 2. The slow path - which is an expansion of the regular method entry.
762 762 //
763 763 // Notes:-
764 764 // * In the G1 code we do not check whether we need to block for
765 765 // a safepoint. If G1 is enabled then we must execute the specialized
766 766 // code for Reference.get (except when the Reference object is null)
767 767 // so that we can log the value in the referent field with an SATB
768 768 // update buffer.
769 769 // If the code for the getfield template is modified so that the
770 770 // G1 pre-barrier code is executed when the current method is
771 771 // Reference.get() then going through the normal method entry
772 772 // will be fine.
773 773 // * The G1 code below can, however, check the receiver object (the instance
774 774 // of java.lang.Reference) and jump to the slow path if null. If the
775 775 // Reference object is null then we obviously cannot fetch the referent
776 776 // and so we don't need to call the G1 pre-barrier. Thus we can use the
777 777 // regular method entry code to generate the NPE.
778 778 //
779 779 // This code is based on generate_accessor_enty.
780 780
781 781 // rbx,: methodOop
782 782 // rcx: receiver (preserve for slow entry into asm interpreter)
783 783
784 784 // rsi: senderSP must preserved for slow path, set SP to it on fast path
785 785
786 786 address entry = __ pc();
787 787
788 788 const int referent_offset = java_lang_ref_Reference::referent_offset;
789 789 guarantee(referent_offset > 0, "referent offset not initialized");
790 790
791 791 if (UseG1GC) {
792 792 Label slow_path;
793 793
794 794 // Check if local 0 != NULL
795 795 // If the receiver is null then it is OK to jump to the slow path.
796 796 __ movptr(rax, Address(rsp, wordSize));
797 797 __ testptr(rax, rax);
798 798 __ jcc(Assembler::zero, slow_path);
799 799
800 800 // rax: local 0 (must be preserved across the G1 barrier call)
801 801 //
802 802 // rbx: method (at this point it's scratch)
803 803 // rcx: receiver (at this point it's scratch)
804 804 // rdx: scratch
805 805 // rdi: scratch
806 806 //
807 807 // rsi: sender sp
808 808
809 809 // Preserve the sender sp in case the pre-barrier
810 810 // calls the runtime
811 811 __ push(rsi);
812 812
813 813 // Load the value of the referent field.
814 814 const Address field_address(rax, referent_offset);
815 815 __ movptr(rax, field_address);
816 816
817 817 // Generate the G1 pre-barrier code to log the value of
818 818 // the referent field in an SATB buffer.
819 819 __ get_thread(rcx);
820 820 __ g1_write_barrier_pre(noreg /* obj */,
821 821 rax /* pre_val */,
822 822 rcx /* thread */,
823 823 rbx /* tmp */,
824 824 true /* tosca_save */,
825 825 true /* expand_call */);
826 826
827 827 // _areturn
828 828 __ pop(rsi); // get sender sp
829 829 __ pop(rdi); // get return address
830 830 __ mov(rsp, rsi); // set sp to sender sp
831 831 __ jmp(rdi);
832 832
833 833 __ bind(slow_path);
834 834 (void) generate_normal_entry(false);
835 835
836 836 return entry;
837 837 }
838 838 #endif // SERIALGC
839 839
840 840 // If G1 is not enabled then attempt to go through the accessor entry point
841 841 // Reference.get is an accessor
842 842 return generate_accessor_entry();
843 843 }
844 844
845 845 //
846 846 // Interpreter stub for calling a native method. (asm interpreter)
847 847 // This sets up a somewhat different looking stack for calling the native method
848 848 // than the typical interpreter frame setup.
849 849 //
850 850
851 851 address InterpreterGenerator::generate_native_entry(bool synchronized) {
852 852 // determine code generation flags
853 853 bool inc_counter = UseCompiler || CountCompiledCalls;
854 854
855 855 // rbx,: methodOop
856 856 // rsi: sender sp
857 857 // rsi: previous interpreter state (C++ interpreter) must preserve
858 858 address entry_point = __ pc();
859 859
860 860
861 861 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
862 862 const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset());
863 863 const Address access_flags (rbx, methodOopDesc::access_flags_offset());
864 864
865 865 // get parameter size (always needed)
866 866 __ load_unsigned_short(rcx, size_of_parameters);
867 867
868 868 // native calls don't need the stack size check since they have no expression stack
869 869 // and the arguments are already on the stack and we only add a handful of words
870 870 // to the stack
871 871
872 872 // rbx,: methodOop
873 873 // rcx: size of parameters
874 874 // rsi: sender sp
875 875
876 876 __ pop(rax); // get return address
877 877 // for natives the size of locals is zero
878 878
879 879 // compute beginning of parameters (rdi)
880 880 __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
881 881
882 882
883 883 // add 2 zero-initialized slots for native calls
884 884 // NULL result handler
885 885 __ push((int32_t)NULL_WORD);
886 886 // NULL oop temp (mirror or jni oop result)
887 887 __ push((int32_t)NULL_WORD);
888 888
889 889 if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count
890 890 // initialize fixed part of activation frame
891 891
892 892 generate_fixed_frame(true);
893 893
894 894 // make sure method is native & not abstract
895 895 #ifdef ASSERT
896 896 __ movl(rax, access_flags);
897 897 {
898 898 Label L;
899 899 __ testl(rax, JVM_ACC_NATIVE);
900 900 __ jcc(Assembler::notZero, L);
901 901 __ stop("tried to execute non-native method as native");
902 902 __ bind(L);
903 903 }
904 904 { Label L;
905 905 __ testl(rax, JVM_ACC_ABSTRACT);
906 906 __ jcc(Assembler::zero, L);
907 907 __ stop("tried to execute abstract method in interpreter");
908 908 __ bind(L);
909 909 }
910 910 #endif
911 911
912 912 // Since at this point in the method invocation the exception handler
913 913 // would try to exit the monitor of synchronized methods which hasn't
914 914 // been entered yet, we set the thread local variable
915 915 // _do_not_unlock_if_synchronized to true. The remove_activation will
916 916 // check this flag.
917 917
918 918 __ get_thread(rax);
919 919 const Address do_not_unlock_if_synchronized(rax,
920 920 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
921 921 __ movbool(do_not_unlock_if_synchronized, true);
922 922
923 923 // increment invocation count & check for overflow
924 924 Label invocation_counter_overflow;
925 925 if (inc_counter) {
926 926 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
927 927 }
928 928
929 929 Label continue_after_compile;
930 930 __ bind(continue_after_compile);
931 931
932 932 bang_stack_shadow_pages(true);
933 933
934 934 // reset the _do_not_unlock_if_synchronized flag
935 935 __ get_thread(rax);
936 936 __ movbool(do_not_unlock_if_synchronized, false);
937 937
938 938 // check for synchronized methods
939 939 // Must happen AFTER invocation_counter check and stack overflow check,
940 940 // so method is not locked if overflows.
941 941 //
942 942 if (synchronized) {
943 943 lock_method();
944 944 } else {
945 945 // no synchronization necessary
946 946 #ifdef ASSERT
947 947 { Label L;
948 948 __ movl(rax, access_flags);
949 949 __ testl(rax, JVM_ACC_SYNCHRONIZED);
950 950 __ jcc(Assembler::zero, L);
951 951 __ stop("method needs synchronization");
952 952 __ bind(L);
953 953 }
954 954 #endif
955 955 }
956 956
957 957 // start execution
958 958 #ifdef ASSERT
959 959 { Label L;
960 960 const Address monitor_block_top (rbp,
961 961 frame::interpreter_frame_monitor_block_top_offset * wordSize);
962 962 __ movptr(rax, monitor_block_top);
963 963 __ cmpptr(rax, rsp);
964 964 __ jcc(Assembler::equal, L);
965 965 __ stop("broken stack frame setup in interpreter");
966 966 __ bind(L);
967 967 }
968 968 #endif
969 969
970 970 // jvmti/dtrace support
971 971 __ notify_method_entry();
972 972
973 973 // work registers
974 974 const Register method = rbx;
975 975 const Register thread = rdi;
976 976 const Register t = rcx;
977 977
978 978 // allocate space for parameters
979 979 __ get_method(method);
980 980 __ verify_oop(method);
981 981 __ load_unsigned_short(t, Address(method, methodOopDesc::size_of_parameters_offset()));
982 982 __ shlptr(t, Interpreter::logStackElementSize);
983 983 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
984 984 __ subptr(rsp, t);
985 985 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
986 986
987 987 // get signature handler
988 988 { Label L;
989 989 __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
990 990 __ testptr(t, t);
991 991 __ jcc(Assembler::notZero, L);
992 992 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
993 993 __ get_method(method);
994 994 __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
995 995 __ bind(L);
996 996 }
997 997
998 998 // call signature handler
999 999 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rdi, "adjust this code");
1000 1000 assert(InterpreterRuntime::SignatureHandlerGenerator::to () == rsp, "adjust this code");
1001 1001 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t , "adjust this code");
1002 1002 // The generated handlers do not touch RBX (the method oop).
1003 1003 // However, large signatures cannot be cached and are generated
1004 1004 // each time here. The slow-path generator will blow RBX
1005 1005 // sometime, so we must reload it after the call.
1006 1006 __ call(t);
1007 1007 __ get_method(method); // slow path call blows RBX on DevStudio 5.0
1008 1008
1009 1009 // result handler is in rax,
1010 1010 // set result handler
1011 1011 __ movptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), rax);
1012 1012
1013 1013 // pass mirror handle if static call
1014 1014 { Label L;
1015 1015 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
1016 1016 __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
1017 1017 __ testl(t, JVM_ACC_STATIC);
1018 1018 __ jcc(Assembler::zero, L);
1019 1019 // get mirror
1020 1020 __ movptr(t, Address(method, methodOopDesc:: constants_offset()));
1021 1021 __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
1022 1022 __ movptr(t, Address(t, mirror_offset));
1023 1023 // copy mirror into activation frame
1024 1024 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t);
1025 1025 // pass handle to mirror
1026 1026 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
1027 1027 __ movptr(Address(rsp, wordSize), t);
1028 1028 __ bind(L);
1029 1029 }
1030 1030
1031 1031 // get native function entry point
1032 1032 { Label L;
1033 1033 __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
1034 1034 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1035 1035 __ cmpptr(rax, unsatisfied.addr());
1036 1036 __ jcc(Assembler::notEqual, L);
1037 1037 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
1038 1038 __ get_method(method);
1039 1039 __ verify_oop(method);
1040 1040 __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
1041 1041 __ bind(L);
1042 1042 }
1043 1043
1044 1044 // pass JNIEnv
1045 1045 __ get_thread(thread);
1046 1046 __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
1047 1047 __ movptr(Address(rsp, 0), t);
1048 1048
1049 1049 // set_last_Java_frame_before_call
1050 1050 // It is enough that the pc()
1051 1051 // points into the right code segment. It does not have to be the correct return pc.
1052 1052 __ set_last_Java_frame(thread, noreg, rbp, __ pc());
1053 1053
1054 1054 // change thread state
1055 1055 #ifdef ASSERT
1056 1056 { Label L;
1057 1057 __ movl(t, Address(thread, JavaThread::thread_state_offset()));
1058 1058 __ cmpl(t, _thread_in_Java);
1059 1059 __ jcc(Assembler::equal, L);
1060 1060 __ stop("Wrong thread state in native stub");
1061 1061 __ bind(L);
1062 1062 }
1063 1063 #endif
1064 1064
1065 1065 // Change state to native
1066 1066 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1067 1067 __ call(rax);
1068 1068
1069 1069 // result potentially in rdx:rax or ST0
1070 1070
1071 1071 // Either restore the MXCSR register after returning from the JNI Call
1072 1072 // or verify that it wasn't changed.
1073 1073 if (VM_Version::supports_sse()) {
1074 1074 if (RestoreMXCSROnJNICalls) {
1075 1075 __ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
1076 1076 }
1077 1077 else if (CheckJNICalls ) {
1078 1078 __ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
1079 1079 }
1080 1080 }
1081 1081
1082 1082 // Either restore the x87 floating pointer control word after returning
1083 1083 // from the JNI call or verify that it wasn't changed.
1084 1084 if (CheckJNICalls) {
1085 1085 __ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
1086 1086 }
1087 1087
1088 1088 // save potential result in ST(0) & rdx:rax
1089 1089 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 -
1090 1090 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers)
1091 1091 // It is safe to do this push because state is _thread_in_native and return address will be found
1092 1092 // via _last_native_pc and not via _last_jave_sp
1093 1093
1094 1094 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result.
1095 1095 // If the order changes or anything else is added to the stack the code in
1096 1096 // interpreter_frame_result will have to be changed.
1097 1097
1098 1098 { Label L;
1099 1099 Label push_double;
1100 1100 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
1101 1101 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
1102 1102 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
1103 1103 float_handler.addr());
1104 1104 __ jcc(Assembler::equal, push_double);
1105 1105 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
1106 1106 double_handler.addr());
1107 1107 __ jcc(Assembler::notEqual, L);
1108 1108 __ bind(push_double);
1109 1109 __ push(dtos);
1110 1110 __ bind(L);
1111 1111 }
1112 1112 __ push(ltos);
1113 1113
1114 1114 // change thread state
1115 1115 __ get_thread(thread);
1116 1116 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
1117 1117 if(os::is_MP()) {
1118 1118 if (UseMembar) {
1119 1119 // Force this write out before the read below
1120 1120 __ membar(Assembler::Membar_mask_bits(
1121 1121 Assembler::LoadLoad | Assembler::LoadStore |
1122 1122 Assembler::StoreLoad | Assembler::StoreStore));
1123 1123 } else {
1124 1124 // Write serialization page so VM thread can do a pseudo remote membar.
1125 1125 // We use the current thread pointer to calculate a thread specific
1126 1126 // offset to write to within the page. This minimizes bus traffic
1127 1127 // due to cache line collision.
1128 1128 __ serialize_memory(thread, rcx);
1129 1129 }
1130 1130 }
1131 1131
1132 1132 if (AlwaysRestoreFPU) {
1133 1133 // Make sure the control word is correct.
1134 1134 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
1135 1135 }
1136 1136
1137 1137 // check for safepoint operation in progress and/or pending suspend requests
1138 1138 { Label Continue;
1139 1139
1140 1140 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
1141 1141 SafepointSynchronize::_not_synchronized);
1142 1142
1143 1143 Label L;
1144 1144 __ jcc(Assembler::notEqual, L);
1145 1145 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
1146 1146 __ jcc(Assembler::equal, Continue);
1147 1147 __ bind(L);
1148 1148
1149 1149 // Don't use call_VM as it will see a possible pending exception and forward it
1150 1150 // and never return here preventing us from clearing _last_native_pc down below.
1151 1151 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
1152 1152 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
1153 1153 // by hand.
1154 1154 //
1155 1155 __ push(thread);
1156 1156 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
1157 1157 JavaThread::check_special_condition_for_native_trans)));
1158 1158 __ increment(rsp, wordSize);
1159 1159 __ get_thread(thread);
1160 1160
1161 1161 __ bind(Continue);
1162 1162 }
1163 1163
1164 1164 // change thread state
1165 1165 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
1166 1166
1167 1167 __ reset_last_Java_frame(thread, true, true);
1168 1168
1169 1169 // reset handle block
1170 1170 __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
1171 1171 __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
1172 1172
1173 1173 // If result was an oop then unbox and save it in the frame
1174 1174 { Label L;
1175 1175 Label no_oop, store_result;
1176 1176 ExternalAddress handler(AbstractInterpreter::result_handler(T_OBJECT));
1177 1177 __ cmpptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize),
1178 1178 handler.addr());
1179 1179 __ jcc(Assembler::notEqual, no_oop);
1180 1180 __ cmpptr(Address(rsp, 0), (int32_t)NULL_WORD);
1181 1181 __ pop(ltos);
1182 1182 __ testptr(rax, rax);
1183 1183 __ jcc(Assembler::zero, store_result);
1184 1184 // unbox
1185 1185 __ movptr(rax, Address(rax, 0));
1186 1186 __ bind(store_result);
1187 1187 __ movptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset)*wordSize), rax);
1188 1188 // keep stack depth as expected by pushing oop which will eventually be discarded
1189 1189 __ push(ltos);
1190 1190 __ bind(no_oop);
1191 1191 }
1192 1192
1193 1193 {
1194 1194 Label no_reguard;
1195 1195 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
1196 1196 __ jcc(Assembler::notEqual, no_reguard);
1197 1197
1198 1198 __ pusha();
1199 1199 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1200 1200 __ popa();
1201 1201
1202 1202 __ bind(no_reguard);
1203 1203 }
1204 1204
1205 1205 // restore rsi to have legal interpreter frame,
1206 1206 // i.e., bci == 0 <=> rsi == code_base()
1207 1207 // Can't call_VM until bcp is within reasonable.
1208 1208 __ get_method(method); // method is junk from thread_in_native to now.
1209 1209 __ verify_oop(method);
1210 1210 __ movptr(rsi, Address(method,methodOopDesc::const_offset())); // get constMethodOop
1211 1211 __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase
1212 1212
1213 1213 // handle exceptions (exception handling will handle unlocking!)
1214 1214 { Label L;
1215 1215 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
1216 1216 __ jcc(Assembler::zero, L);
1217 1217 // Note: At some point we may want to unify this with the code used in call_VM_base();
1218 1218 // i.e., we should use the StubRoutines::forward_exception code. For now this
1219 1219 // doesn't work here because the rsp is not correctly set at this point.
1220 1220 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
1221 1221 __ should_not_reach_here();
1222 1222 __ bind(L);
1223 1223 }
1224 1224
1225 1225 // do unlocking if necessary
1226 1226 { Label L;
1227 1227 __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
1228 1228 __ testl(t, JVM_ACC_SYNCHRONIZED);
1229 1229 __ jcc(Assembler::zero, L);
1230 1230 // the code below should be shared with interpreter macro assembler implementation
1231 1231 { Label unlock;
1232 1232 // BasicObjectLock will be first in list, since this is a synchronized method. However, need
1233 1233 // to check that the object has not been unlocked by an explicit monitorexit bytecode.
1234 1234 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
1235 1235
1236 1236 __ lea(rdx, monitor); // address of first monitor
1237 1237
1238 1238 __ movptr(t, Address(rdx, BasicObjectLock::obj_offset_in_bytes()));
1239 1239 __ testptr(t, t);
1240 1240 __ jcc(Assembler::notZero, unlock);
1241 1241
1242 1242 // Entry already unlocked, need to throw exception
1243 1243 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1244 1244 __ should_not_reach_here();
1245 1245
1246 1246 __ bind(unlock);
1247 1247 __ unlock_object(rdx);
1248 1248 }
1249 1249 __ bind(L);
1250 1250 }
1251 1251
1252 1252 // jvmti/dtrace support
1253 1253 // Note: This must happen _after_ handling/throwing any exceptions since
1254 1254 // the exception handler code notifies the runtime of method exits
1255 1255 // too. If this happens before, method entry/exit notifications are
1256 1256 // not properly paired (was bug - gri 11/22/99).
1257 1257 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1258 1258
1259 1259 // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result
1260 1260 __ pop(ltos);
1261 1261 __ movptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
1262 1262 __ call(t);
1263 1263
1264 1264 // remove activation
1265 1265 __ movptr(t, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1266 1266 __ leave(); // remove frame anchor
1267 1267 __ pop(rdi); // get return address
1268 1268 __ mov(rsp, t); // set sp to sender sp
1269 1269 __ jmp(rdi);
1270 1270
1271 1271 if (inc_counter) {
1272 1272 // Handle overflow of counter and compile method
1273 1273 __ bind(invocation_counter_overflow);
1274 1274 generate_counter_overflow(&continue_after_compile);
1275 1275 }
1276 1276
1277 1277 return entry_point;
1278 1278 }
1279 1279
1280 1280 //
1281 1281 // Generic interpreted method entry to (asm) interpreter
1282 1282 //
1283 1283 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1284 1284 // determine code generation flags
1285 1285 bool inc_counter = UseCompiler || CountCompiledCalls;
1286 1286
1287 1287 // rbx,: methodOop
1288 1288 // rsi: sender sp
1289 1289 address entry_point = __ pc();
1290 1290
1291 1291
1292 1292 const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
1293 1293 const Address size_of_locals (rbx, methodOopDesc::size_of_locals_offset());
1294 1294 const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset());
1295 1295 const Address access_flags (rbx, methodOopDesc::access_flags_offset());
1296 1296
1297 1297 // get parameter size (always needed)
1298 1298 __ load_unsigned_short(rcx, size_of_parameters);
1299 1299
1300 1300 // rbx,: methodOop
1301 1301 // rcx: size of parameters
1302 1302
1303 1303 // rsi: sender_sp (could differ from sp+wordSize if we were called via c2i )
1304 1304
1305 1305 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
1306 1306 __ subl(rdx, rcx); // rdx = no. of additional locals
1307 1307
1308 1308 // see if we've got enough room on the stack for locals plus overhead.
1309 1309 generate_stack_overflow_check();
1310 1310
1311 1311 // get return address
1312 1312 __ pop(rax);
1313 1313
1314 1314 // compute beginning of parameters (rdi)
1315 1315 __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
1316 1316
1317 1317 // rdx - # of additional locals
1318 1318 // allocate space for locals
1319 1319 // explicitly initialize locals
1320 1320 {
1321 1321 Label exit, loop;
1322 1322 __ testl(rdx, rdx);
1323 1323 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
1324 1324 __ bind(loop);
1325 1325 __ push((int32_t)NULL_WORD); // initialize local variables
1326 1326 __ decrement(rdx); // until everything initialized
1327 1327 __ jcc(Assembler::greater, loop);
1328 1328 __ bind(exit);
1329 1329 }
1330 1330
1331 1331 if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count
1332 1332 // initialize fixed part of activation frame
1333 1333 generate_fixed_frame(false);
1334 1334
1335 1335 // make sure method is not native & not abstract
1336 1336 #ifdef ASSERT
1337 1337 __ movl(rax, access_flags);
1338 1338 {
1339 1339 Label L;
1340 1340 __ testl(rax, JVM_ACC_NATIVE);
1341 1341 __ jcc(Assembler::zero, L);
1342 1342 __ stop("tried to execute native method as non-native");
1343 1343 __ bind(L);
1344 1344 }
1345 1345 { Label L;
1346 1346 __ testl(rax, JVM_ACC_ABSTRACT);
1347 1347 __ jcc(Assembler::zero, L);
1348 1348 __ stop("tried to execute abstract method in interpreter");
1349 1349 __ bind(L);
1350 1350 }
1351 1351 #endif
1352 1352
1353 1353 // Since at this point in the method invocation the exception handler
1354 1354 // would try to exit the monitor of synchronized methods which hasn't
1355 1355 // been entered yet, we set the thread local variable
1356 1356 // _do_not_unlock_if_synchronized to true. The remove_activation will
1357 1357 // check this flag.
1358 1358
1359 1359 __ get_thread(rax);
1360 1360 const Address do_not_unlock_if_synchronized(rax,
1361 1361 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1362 1362 __ movbool(do_not_unlock_if_synchronized, true);
1363 1363
1364 1364 // increment invocation count & check for overflow
1365 1365 Label invocation_counter_overflow;
1366 1366 Label profile_method;
1367 1367 Label profile_method_continue;
1368 1368 if (inc_counter) {
1369 1369 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
1370 1370 if (ProfileInterpreter) {
1371 1371 __ bind(profile_method_continue);
1372 1372 }
1373 1373 }
1374 1374 Label continue_after_compile;
1375 1375 __ bind(continue_after_compile);
1376 1376
1377 1377 bang_stack_shadow_pages(false);
1378 1378
1379 1379 // reset the _do_not_unlock_if_synchronized flag
1380 1380 __ get_thread(rax);
1381 1381 __ movbool(do_not_unlock_if_synchronized, false);
1382 1382
1383 1383 // check for synchronized methods
1384 1384 // Must happen AFTER invocation_counter check and stack overflow check,
1385 1385 // so method is not locked if overflows.
1386 1386 //
1387 1387 if (synchronized) {
1388 1388 // Allocate monitor and lock method
1389 1389 lock_method();
1390 1390 } else {
1391 1391 // no synchronization necessary
1392 1392 #ifdef ASSERT
1393 1393 { Label L;
1394 1394 __ movl(rax, access_flags);
1395 1395 __ testl(rax, JVM_ACC_SYNCHRONIZED);
1396 1396 __ jcc(Assembler::zero, L);
1397 1397 __ stop("method needs synchronization");
1398 1398 __ bind(L);
1399 1399 }
1400 1400 #endif
1401 1401 }
1402 1402
1403 1403 // start execution
1404 1404 #ifdef ASSERT
1405 1405 { Label L;
1406 1406 const Address monitor_block_top (rbp,
1407 1407 frame::interpreter_frame_monitor_block_top_offset * wordSize);
1408 1408 __ movptr(rax, monitor_block_top);
1409 1409 __ cmpptr(rax, rsp);
1410 1410 __ jcc(Assembler::equal, L);
1411 1411 __ stop("broken stack frame setup in interpreter");
1412 1412 __ bind(L);
1413 1413 }
1414 1414 #endif
1415 1415
1416 1416 // jvmti support
1417 1417 __ notify_method_entry();
1418 1418
1419 1419 __ dispatch_next(vtos);
1420 1420
1421 1421 // invocation counter overflow
1422 1422 if (inc_counter) {
1423 1423 if (ProfileInterpreter) {
1424 1424 // We have decided to profile this method in the interpreter
1425 1425 __ bind(profile_method);
1426 1426 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1427 1427 __ set_method_data_pointer_for_bcp();
1428 1428 __ get_method(rbx);
1429 1429 __ jmp(profile_method_continue);
1430 1430 }
1431 1431 // Handle overflow of counter and compile method
1432 1432 __ bind(invocation_counter_overflow);
1433 1433 generate_counter_overflow(&continue_after_compile);
1434 1434 }
1435 1435
1436 1436 return entry_point;
1437 1437 }
1438 1438
1439 1439 //------------------------------------------------------------------------------------------------------------------------
1440 1440 // Entry points
1441 1441 //
1442 1442 // Here we generate the various kind of entries into the interpreter.
1443 1443 // The two main entry type are generic bytecode methods and native call method.
1444 1444 // These both come in synchronized and non-synchronized versions but the
1445 1445 // frame layout they create is very similar. The other method entry
1446 1446 // types are really just special purpose entries that are really entry
1447 1447 // and interpretation all in one. These are for trivial methods like
1448 1448 // accessor, empty, or special math methods.
1449 1449 //
1450 1450 // When control flow reaches any of the entry types for the interpreter
1451 1451 // the following holds ->
1452 1452 //
1453 1453 // Arguments:
1454 1454 //
1455 1455 // rbx,: methodOop
1456 1456 // rcx: receiver
1457 1457 //
1458 1458 //
1459 1459 // Stack layout immediately at entry
1460 1460 //
1461 1461 // [ return address ] <--- rsp
1462 1462 // [ parameter n ]
1463 1463 // ...
1464 1464 // [ parameter 1 ]
1465 1465 // [ expression stack ] (caller's java expression stack)
1466 1466
1467 1467 // Assuming that we don't go to one of the trivial specialized
1468 1468 // entries the stack will look like below when we are ready to execute
1469 1469 // the first bytecode (or call the native routine). The register usage
1470 1470 // will be as the template based interpreter expects (see interpreter_x86.hpp).
1471 1471 //
1472 1472 // local variables follow incoming parameters immediately; i.e.
1473 1473 // the return address is moved to the end of the locals).
1474 1474 //
1475 1475 // [ monitor entry ] <--- rsp
1476 1476 // ...
1477 1477 // [ monitor entry ]
1478 1478 // [ expr. stack bottom ]
1479 1479 // [ saved rsi ]
1480 1480 // [ current rdi ]
1481 1481 // [ methodOop ]
1482 1482 // [ saved rbp, ] <--- rbp,
1483 1483 // [ return address ]
1484 1484 // [ local variable m ]
1485 1485 // ...
1486 1486 // [ local variable 1 ]
1487 1487 // [ parameter n ]
1488 1488 // ...
1489 1489 // [ parameter 1 ] <--- rdi
1490 1490
1491 1491 address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
1492 1492 // determine code generation flags
1493 1493 bool synchronized = false;
1494 1494 address entry_point = NULL;
1495 1495
1496 1496 switch (kind) {
1497 1497 case Interpreter::zerolocals : break;
1498 1498 case Interpreter::zerolocals_synchronized: synchronized = true; break;
1499 1499 case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
1500 1500 case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
1501 1501 case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
1502 1502 case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
1503 1503 case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
1504 1504 case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break;
1505 1505
1506 1506 case Interpreter::java_lang_math_sin : // fall thru
1507 1507 case Interpreter::java_lang_math_cos : // fall thru
1508 1508 case Interpreter::java_lang_math_tan : // fall thru
1509 1509 case Interpreter::java_lang_math_abs : // fall thru
1510 1510 case Interpreter::java_lang_math_log : // fall thru
1511 1511 case Interpreter::java_lang_math_log10 : // fall thru
1512 1512 case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
1513 1513 case Interpreter::java_lang_ref_reference_get
1514 1514 : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
1515 1515 default : ShouldNotReachHere(); break;
1516 1516 }
1517 1517
1518 1518 if (entry_point) return entry_point;
1519 1519
1520 1520 return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
1521 1521
1522 1522 }
1523 1523
1524 1524 // These should never be compiled since the interpreter will prefer
1525 1525 // the compiled version to the intrinsic version.
1526 1526 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
1527 1527 switch (method_kind(m)) {
1528 1528 case Interpreter::java_lang_math_sin : // fall thru
1529 1529 case Interpreter::java_lang_math_cos : // fall thru
1530 1530 case Interpreter::java_lang_math_tan : // fall thru
1531 1531 case Interpreter::java_lang_math_abs : // fall thru
1532 1532 case Interpreter::java_lang_math_log : // fall thru
1533 1533 case Interpreter::java_lang_math_log10 : // fall thru
1534 1534 case Interpreter::java_lang_math_sqrt :
1535 1535 return false;
1536 1536 default:
1537 1537 return true;
1538 1538 }
1539 1539 }
1540 1540
1541 1541 // How much stack a method activation needs in words.
1542 1542 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
1543 1543
1544 1544 const int stub_code = 4; // see generate_call_stub
1545 1545 // Save space for one monitor to get into the interpreted method in case
1546 1546 // the method is synchronized
1547 1547 int monitor_size = method->is_synchronized() ?
1548 1548 1*frame::interpreter_frame_monitor_size() : 0;
1549 1549
1550 1550 // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
1551 1551 // be sure to change this if you add/subtract anything to/from the overhead area
1552 1552 const int overhead_size = -frame::interpreter_frame_initial_sp_offset;
1553 1553
1554 1554 const int extra_stack = methodOopDesc::extra_stack_entries();
1555 1555 const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
1556 1556 Interpreter::stackElementWords;
1557 1557 return overhead_size + method_stack + stub_code;
1558 1558 }
1559 1559
1560 1560 // asm based interpreter deoptimization helpers
1561 1561
1562 1562 int AbstractInterpreter::layout_activation(methodOop method,
1563 1563 int tempcount,
1564 1564 int popframe_extra_args,
1565 1565 int moncount,
1566 1566 int caller_actual_parameters,
1567 1567 int callee_param_count,
1568 1568 int callee_locals,
1569 1569 frame* caller,
1570 1570 frame* interpreter_frame,
1571 1571 bool is_top_frame) {
1572 1572 // Note: This calculation must exactly parallel the frame setup
1573 1573 // in AbstractInterpreterGenerator::generate_method_entry.
1574 1574 // If interpreter_frame!=NULL, set up the method, locals, and monitors.
1575 1575 // The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
1576 1576 // as determined by a previous call to this method.
1577 1577 // It is also guaranteed to be walkable even though it is in a skeletal state
1578 1578 // NOTE: return size is in words not bytes
1579 1579
1580 1580 // fixed size of an interpreter frame:
1581 1581 int max_locals = method->max_locals() * Interpreter::stackElementWords;
1582 1582 int extra_locals = (method->max_locals() - method->size_of_parameters()) *
1583 1583 Interpreter::stackElementWords;
1584 1584
1585 1585 int overhead = frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset;
1586 1586
1587 1587 // Our locals were accounted for by the caller (or last_frame_adjust on the transistion)
1588 1588 // Since the callee parameters already account for the callee's params we only need to account for
1589 1589 // the extra locals.
1590 1590
1591 1591
1592 1592 int size = overhead +
1593 1593 ((callee_locals - callee_param_count)*Interpreter::stackElementWords) +
1594 1594 (moncount*frame::interpreter_frame_monitor_size()) +
1595 1595 tempcount*Interpreter::stackElementWords + popframe_extra_args;
1596 1596
1597 1597 if (interpreter_frame != NULL) {
1598 1598 #ifdef ASSERT
1599 1599 if (!EnableInvokeDynamic)
1600 1600 // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
1601 1601 // Probably, since deoptimization doesn't work yet.
↓ open down ↓ |
1601 lines elided |
↑ open up ↑ |
1602 1602 assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
1603 1603 assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
1604 1604 #endif
1605 1605
1606 1606 interpreter_frame->interpreter_frame_set_method(method);
1607 1607 // NOTE the difference in using sender_sp and interpreter_frame_sender_sp
1608 1608 // interpreter_frame_sender_sp is the original sp of the caller (the unextended_sp)
1609 1609 // and sender_sp is fp+8
1610 1610 intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
1611 1611
1612 +#ifdef ASSERT
1613 + if (caller->is_interpreted_frame()) {
1614 + assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
1615 + }
1616 +#endif
1617 +
1612 1618 interpreter_frame->interpreter_frame_set_locals(locals);
1613 1619 BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
1614 1620 BasicObjectLock* monbot = montop - moncount;
1615 1621 interpreter_frame->interpreter_frame_set_monitor_end(monbot);
1616 1622
1617 1623 // Set last_sp
1618 1624 intptr_t* rsp = (intptr_t*) monbot -
1619 1625 tempcount*Interpreter::stackElementWords -
1620 1626 popframe_extra_args;
1621 1627 interpreter_frame->interpreter_frame_set_last_sp(rsp);
1622 1628
1623 1629 // All frames but the initial (oldest) interpreter frame we fill in have a
1624 1630 // value for sender_sp that allows walking the stack but isn't
1625 1631 // truly correct. Correct the value here.
1626 1632
1627 1633 if (extra_locals != 0 &&
1628 1634 interpreter_frame->sender_sp() == interpreter_frame->interpreter_frame_sender_sp() ) {
1629 1635 interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + extra_locals);
1630 1636 }
1631 1637 *interpreter_frame->interpreter_frame_cache_addr() =
1632 1638 method->constants()->cache();
1633 1639 }
1634 1640 return size;
1635 1641 }
1636 1642
1637 1643
1638 1644 //------------------------------------------------------------------------------------------------------------------------
1639 1645 // Exceptions
1640 1646
1641 1647 void TemplateInterpreterGenerator::generate_throw_exception() {
1642 1648 // Entry point in previous activation (i.e., if the caller was interpreted)
1643 1649 Interpreter::_rethrow_exception_entry = __ pc();
1644 1650 const Register thread = rcx;
1645 1651
1646 1652 // Restore sp to interpreter_frame_last_sp even though we are going
1647 1653 // to empty the expression stack for the exception processing.
1648 1654 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
1649 1655 // rax,: exception
1650 1656 // rdx: return address/pc that threw exception
1651 1657 __ restore_bcp(); // rsi points to call/send
1652 1658 __ restore_locals();
1653 1659
1654 1660 // Entry point for exceptions thrown within interpreter code
1655 1661 Interpreter::_throw_exception_entry = __ pc();
1656 1662 // expression stack is undefined here
1657 1663 // rax,: exception
1658 1664 // rsi: exception bcp
1659 1665 __ verify_oop(rax);
1660 1666
1661 1667 // expression stack must be empty before entering the VM in case of an exception
1662 1668 __ empty_expression_stack();
1663 1669 __ empty_FPU_stack();
1664 1670 // find exception handler address and preserve exception oop
1665 1671 __ call_VM(rdx, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), rax);
1666 1672 // rax,: exception handler entry point
1667 1673 // rdx: preserved exception oop
1668 1674 // rsi: bcp for exception handler
1669 1675 __ push_ptr(rdx); // push exception which is now the only value on the stack
1670 1676 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!)
1671 1677
1672 1678 // If the exception is not handled in the current frame the frame is removed and
1673 1679 // the exception is rethrown (i.e. exception continuation is _rethrow_exception).
1674 1680 //
1675 1681 // Note: At this point the bci is still the bxi for the instruction which caused
1676 1682 // the exception and the expression stack is empty. Thus, for any VM calls
1677 1683 // at this point, GC will find a legal oop map (with empty expression stack).
1678 1684
1679 1685 // In current activation
1680 1686 // tos: exception
1681 1687 // rsi: exception bcp
1682 1688
1683 1689 //
1684 1690 // JVMTI PopFrame support
1685 1691 //
1686 1692
1687 1693 Interpreter::_remove_activation_preserving_args_entry = __ pc();
1688 1694 __ empty_expression_stack();
1689 1695 __ empty_FPU_stack();
1690 1696 // Set the popframe_processing bit in pending_popframe_condition indicating that we are
1691 1697 // currently handling popframe, so that call_VMs that may happen later do not trigger new
1692 1698 // popframe handling cycles.
1693 1699 __ get_thread(thread);
1694 1700 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset()));
1695 1701 __ orl(rdx, JavaThread::popframe_processing_bit);
1696 1702 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx);
1697 1703
1698 1704 {
1699 1705 // Check to see whether we are returning to a deoptimized frame.
1700 1706 // (The PopFrame call ensures that the caller of the popped frame is
1701 1707 // either interpreted or compiled and deoptimizes it if compiled.)
1702 1708 // In this case, we can't call dispatch_next() after the frame is
1703 1709 // popped, but instead must save the incoming arguments and restore
1704 1710 // them after deoptimization has occurred.
1705 1711 //
1706 1712 // Note that we don't compare the return PC against the
1707 1713 // deoptimization blob's unpack entry because of the presence of
1708 1714 // adapter frames in C2.
1709 1715 Label caller_not_deoptimized;
1710 1716 __ movptr(rdx, Address(rbp, frame::return_addr_offset * wordSize));
1711 1717 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), rdx);
1712 1718 __ testl(rax, rax);
1713 1719 __ jcc(Assembler::notZero, caller_not_deoptimized);
1714 1720
1715 1721 // Compute size of arguments for saving when returning to deoptimized caller
1716 1722 __ get_method(rax);
1717 1723 __ verify_oop(rax);
1718 1724 __ load_unsigned_short(rax, Address(rax, in_bytes(methodOopDesc::size_of_parameters_offset())));
1719 1725 __ shlptr(rax, Interpreter::logStackElementSize);
1720 1726 __ restore_locals();
1721 1727 __ subptr(rdi, rax);
1722 1728 __ addptr(rdi, wordSize);
1723 1729 // Save these arguments
1724 1730 __ get_thread(thread);
1725 1731 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), thread, rax, rdi);
1726 1732
1727 1733 __ remove_activation(vtos, rdx,
1728 1734 /* throw_monitor_exception */ false,
1729 1735 /* install_monitor_exception */ false,
1730 1736 /* notify_jvmdi */ false);
1731 1737
1732 1738 // Inform deoptimization that it is responsible for restoring these arguments
1733 1739 __ get_thread(thread);
1734 1740 __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_force_deopt_reexecution_bit);
1735 1741
1736 1742 // Continue in deoptimization handler
1737 1743 __ jmp(rdx);
1738 1744
1739 1745 __ bind(caller_not_deoptimized);
1740 1746 }
1741 1747
1742 1748 __ remove_activation(vtos, rdx,
1743 1749 /* throw_monitor_exception */ false,
1744 1750 /* install_monitor_exception */ false,
1745 1751 /* notify_jvmdi */ false);
1746 1752
1747 1753 // Finish with popframe handling
1748 1754 // A previous I2C followed by a deoptimization might have moved the
1749 1755 // outgoing arguments further up the stack. PopFrame expects the
1750 1756 // mutations to those outgoing arguments to be preserved and other
1751 1757 // constraints basically require this frame to look exactly as
1752 1758 // though it had previously invoked an interpreted activation with
1753 1759 // no space between the top of the expression stack (current
1754 1760 // last_sp) and the top of stack. Rather than force deopt to
1755 1761 // maintain this kind of invariant all the time we call a small
1756 1762 // fixup routine to move the mutated arguments onto the top of our
1757 1763 // expression stack if necessary.
1758 1764 __ mov(rax, rsp);
1759 1765 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1760 1766 __ get_thread(thread);
1761 1767 // PC must point into interpreter here
1762 1768 __ set_last_Java_frame(thread, noreg, rbp, __ pc());
1763 1769 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx);
1764 1770 __ get_thread(thread);
1765 1771 __ reset_last_Java_frame(thread, true, true);
1766 1772 // Restore the last_sp and null it out
1767 1773 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1768 1774 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
1769 1775
1770 1776 __ restore_bcp();
1771 1777 __ restore_locals();
1772 1778 // The method data pointer was incremented already during
1773 1779 // call profiling. We have to restore the mdp for the current bcp.
1774 1780 if (ProfileInterpreter) {
1775 1781 __ set_method_data_pointer_for_bcp();
1776 1782 }
1777 1783
1778 1784 // Clear the popframe condition flag
1779 1785 __ get_thread(thread);
1780 1786 __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive);
1781 1787
1782 1788 __ dispatch_next(vtos);
1783 1789 // end of PopFrame support
1784 1790
1785 1791 Interpreter::_remove_activation_entry = __ pc();
1786 1792
1787 1793 // preserve exception over this code sequence
1788 1794 __ pop_ptr(rax);
1789 1795 __ get_thread(thread);
1790 1796 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax);
1791 1797 // remove the activation (without doing throws on illegalMonitorExceptions)
1792 1798 __ remove_activation(vtos, rdx, false, true, false);
1793 1799 // restore exception
1794 1800 __ get_thread(thread);
1795 1801 __ movptr(rax, Address(thread, JavaThread::vm_result_offset()));
1796 1802 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
1797 1803 __ verify_oop(rax);
1798 1804
1799 1805 // Inbetween activations - previous activation type unknown yet
1800 1806 // compute continuation point - the continuation point expects
1801 1807 // the following registers set up:
1802 1808 //
1803 1809 // rax: exception
1804 1810 // rdx: return address/pc that threw exception
1805 1811 // rsp: expression stack of caller
1806 1812 // rbp: rbp, of caller
1807 1813 __ push(rax); // save exception
1808 1814 __ push(rdx); // save return address
1809 1815 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, rdx);
1810 1816 __ mov(rbx, rax); // save exception handler
1811 1817 __ pop(rdx); // restore return address
1812 1818 __ pop(rax); // restore exception
1813 1819 // Note that an "issuing PC" is actually the next PC after the call
1814 1820 __ jmp(rbx); // jump to exception handler of caller
1815 1821 }
1816 1822
1817 1823
1818 1824 //
1819 1825 // JVMTI ForceEarlyReturn support
1820 1826 //
1821 1827 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1822 1828 address entry = __ pc();
1823 1829 const Register thread = rcx;
1824 1830
1825 1831 __ restore_bcp();
1826 1832 __ restore_locals();
1827 1833 __ empty_expression_stack();
1828 1834 __ empty_FPU_stack();
1829 1835 __ load_earlyret_value(state);
1830 1836
1831 1837 __ get_thread(thread);
1832 1838 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));
1833 1839 const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
1834 1840
1835 1841 // Clear the earlyret state
1836 1842 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
1837 1843
1838 1844 __ remove_activation(state, rsi,
1839 1845 false, /* throw_monitor_exception */
1840 1846 false, /* install_monitor_exception */
1841 1847 true); /* notify_jvmdi */
1842 1848 __ jmp(rsi);
1843 1849 return entry;
1844 1850 } // end of ForceEarlyReturn support
1845 1851
1846 1852
1847 1853 //------------------------------------------------------------------------------------------------------------------------
1848 1854 // Helper for vtos entry point generation
1849 1855
1850 1856 void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
1851 1857 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1852 1858 Label L;
1853 1859 fep = __ pc(); __ push(ftos); __ jmp(L);
1854 1860 dep = __ pc(); __ push(dtos); __ jmp(L);
1855 1861 lep = __ pc(); __ push(ltos); __ jmp(L);
1856 1862 aep = __ pc(); __ push(atos); __ jmp(L);
1857 1863 bep = cep = sep = // fall through
1858 1864 iep = __ pc(); __ push(itos); // fall through
1859 1865 vep = __ pc(); __ bind(L); // fall through
1860 1866 generate_and_dispatch(t);
1861 1867 }
1862 1868
1863 1869 //------------------------------------------------------------------------------------------------------------------------
1864 1870 // Generation of individual instructions
1865 1871
1866 1872 // helpers for generate_and_dispatch
1867 1873
1868 1874
1869 1875
1870 1876 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
1871 1877 : TemplateInterpreterGenerator(code) {
1872 1878 generate_all(); // down here so it can be "virtual"
1873 1879 }
1874 1880
1875 1881 //------------------------------------------------------------------------------------------------------------------------
1876 1882
1877 1883 // Non-product code
1878 1884 #ifndef PRODUCT
1879 1885 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1880 1886 address entry = __ pc();
1881 1887
1882 1888 // prepare expression stack
1883 1889 __ pop(rcx); // pop return address so expression stack is 'pure'
1884 1890 __ push(state); // save tosca
1885 1891
1886 1892 // pass tosca registers as arguments & call tracer
1887 1893 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx);
1888 1894 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state)
1889 1895 __ pop(state); // restore tosca
1890 1896
1891 1897 // return
1892 1898 __ jmp(rcx);
1893 1899
1894 1900 return entry;
1895 1901 }
1896 1902
1897 1903
1898 1904 void TemplateInterpreterGenerator::count_bytecode() {
1899 1905 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
1900 1906 }
1901 1907
1902 1908
1903 1909 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1904 1910 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
1905 1911 }
1906 1912
1907 1913
1908 1914 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1909 1915 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
1910 1916 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
1911 1917 __ orl(rbx, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1912 1918 ExternalAddress table((address) BytecodePairHistogram::_counters);
1913 1919 Address index(noreg, rbx, Address::times_4);
1914 1920 __ incrementl(ArrayAddress(table, index));
1915 1921 }
1916 1922
1917 1923
1918 1924 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1919 1925 // Call a little run-time stub to avoid blow-up for each bytecode.
1920 1926 // The run-time runtime saves the right registers, depending on
1921 1927 // the tosca in-state for the given template.
1922 1928 assert(Interpreter::trace_code(t->tos_in()) != NULL,
1923 1929 "entry must have been generated");
1924 1930 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
1925 1931 }
1926 1932
1927 1933
1928 1934 void TemplateInterpreterGenerator::stop_interpreter_at() {
1929 1935 Label L;
1930 1936 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
1931 1937 StopInterpreterAt);
1932 1938 __ jcc(Assembler::notEqual, L);
1933 1939 __ int3();
1934 1940 __ bind(L);
1935 1941 }
1936 1942 #endif // !PRODUCT
1937 1943 #endif // CC_INTERP
↓ open down ↓ |
316 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX