Print this page
rev 1022 : 6829192: JSR 292 needs to support 64-bit x86
Summary: changes for method handles and invokedynamic
Reviewed-by: ?, ?
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/x86/vm/interp_masm_x86_32.cpp
+++ new/src/cpu/x86/vm/interp_masm_x86_32.cpp
1 1 /*
2 2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 #include "incls/_precompiled.incl"
26 26 #include "incls/_interp_masm_x86_32.cpp.incl"
27 27
28 28
29 29 // Implementation of InterpreterMacroAssembler
30 30 #ifdef CC_INTERP
31 31 void InterpreterMacroAssembler::get_method(Register reg) {
32 32 movptr(reg, Address(rbp, -(sizeof(BytecodeInterpreter) + 2 * wordSize)));
33 33 movptr(reg, Address(reg, byte_offset_of(BytecodeInterpreter, _method)));
34 34 }
35 35 #endif // CC_INTERP
36 36
37 37
38 38 #ifndef CC_INTERP
39 39 void InterpreterMacroAssembler::call_VM_leaf_base(
40 40 address entry_point,
41 41 int number_of_arguments
42 42 ) {
43 43 // interpreter specific
44 44 //
45 45 // Note: No need to save/restore bcp & locals (rsi & rdi) pointer
46 46 // since these are callee saved registers and no blocking/
47 47 // GC can happen in leaf calls.
48 48 // Further Note: DO NOT save/restore bcp/locals. If a caller has
49 49 // already saved them so that it can use rsi/rdi as temporaries
50 50 // then a save/restore here will DESTROY the copy the caller
51 51 // saved! There used to be a save_bcp() that only happened in
52 52 // the ASSERT path (no restore_bcp). Which caused bizarre failures
53 53 // when jvm built with ASSERTs.
54 54 #ifdef ASSERT
55 55 { Label L;
56 56 cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
57 57 jcc(Assembler::equal, L);
58 58 stop("InterpreterMacroAssembler::call_VM_leaf_base: last_sp != NULL");
59 59 bind(L);
60 60 }
61 61 #endif
62 62 // super call
63 63 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
64 64 // interpreter specific
65 65
66 66 // Used to ASSERT that rsi/rdi were equal to frame's bcp/locals
67 67 // but since they may not have been saved (and we don't want to
68 68 // save them here (see note above) the assert is invalid.
69 69 }
70 70
71 71
72 72 void InterpreterMacroAssembler::call_VM_base(
73 73 Register oop_result,
74 74 Register java_thread,
75 75 Register last_java_sp,
76 76 address entry_point,
77 77 int number_of_arguments,
78 78 bool check_exceptions
79 79 ) {
80 80 #ifdef ASSERT
81 81 { Label L;
82 82 cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
83 83 jcc(Assembler::equal, L);
84 84 stop("InterpreterMacroAssembler::call_VM_base: last_sp != NULL");
85 85 bind(L);
86 86 }
87 87 #endif /* ASSERT */
88 88 // interpreter specific
89 89 //
90 90 // Note: Could avoid restoring locals ptr (callee saved) - however doesn't
91 91 // really make a difference for these runtime calls, since they are
92 92 // slow anyway. Btw., bcp must be saved/restored since it may change
93 93 // due to GC.
94 94 assert(java_thread == noreg , "not expecting a precomputed java thread");
95 95 save_bcp();
96 96 // super call
97 97 MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
98 98 // interpreter specific
99 99 restore_bcp();
100 100 restore_locals();
101 101 }
102 102
103 103
104 104 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
105 105 if (JvmtiExport::can_pop_frame()) {
106 106 Label L;
107 107 // Initiate popframe handling only if it is not already being processed. If the flag
108 108 // has the popframe_processing bit set, it means that this code is called *during* popframe
109 109 // handling - we don't want to reenter.
110 110 Register pop_cond = java_thread; // Not clear if any other register is available...
111 111 movl(pop_cond, Address(java_thread, JavaThread::popframe_condition_offset()));
112 112 testl(pop_cond, JavaThread::popframe_pending_bit);
113 113 jcc(Assembler::zero, L);
114 114 testl(pop_cond, JavaThread::popframe_processing_bit);
115 115 jcc(Assembler::notZero, L);
116 116 // Call Interpreter::remove_activation_preserving_args_entry() to get the
117 117 // address of the same-named entrypoint in the generated interpreter code.
118 118 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
119 119 jmp(rax);
120 120 bind(L);
121 121 get_thread(java_thread);
122 122 }
123 123 }
124 124
125 125
126 126 void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
127 127 get_thread(rcx);
128 128 movl(rcx, Address(rcx, JavaThread::jvmti_thread_state_offset()));
129 129 const Address tos_addr (rcx, JvmtiThreadState::earlyret_tos_offset());
130 130 const Address oop_addr (rcx, JvmtiThreadState::earlyret_oop_offset());
131 131 const Address val_addr (rcx, JvmtiThreadState::earlyret_value_offset());
132 132 const Address val_addr1(rcx, JvmtiThreadState::earlyret_value_offset()
133 133 + in_ByteSize(wordSize));
134 134 switch (state) {
135 135 case atos: movptr(rax, oop_addr);
136 136 movptr(oop_addr, NULL_WORD);
137 137 verify_oop(rax, state); break;
138 138 case ltos:
139 139 movl(rdx, val_addr1); // fall through
140 140 case btos: // fall through
141 141 case ctos: // fall through
142 142 case stos: // fall through
143 143 case itos: movl(rax, val_addr); break;
144 144 case ftos: fld_s(val_addr); break;
145 145 case dtos: fld_d(val_addr); break;
146 146 case vtos: /* nothing to do */ break;
147 147 default : ShouldNotReachHere();
148 148 }
149 149 // Clean up tos value in the thread object
150 150 movl(tos_addr, (int32_t) ilgl);
151 151 movptr(val_addr, NULL_WORD);
152 152 NOT_LP64(movptr(val_addr1, NULL_WORD));
153 153 }
154 154
155 155
156 156 void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) {
157 157 if (JvmtiExport::can_force_early_return()) {
158 158 Label L;
159 159 Register tmp = java_thread;
160 160 movptr(tmp, Address(tmp, JavaThread::jvmti_thread_state_offset()));
161 161 testptr(tmp, tmp);
162 162 jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit;
163 163
164 164 // Initiate earlyret handling only if it is not already being processed.
165 165 // If the flag has the earlyret_processing bit set, it means that this code
166 166 // is called *during* earlyret handling - we don't want to reenter.
167 167 movl(tmp, Address(tmp, JvmtiThreadState::earlyret_state_offset()));
168 168 cmpl(tmp, JvmtiThreadState::earlyret_pending);
169 169 jcc(Assembler::notEqual, L);
170 170
171 171 // Call Interpreter::remove_activation_early_entry() to get the address of the
172 172 // same-named entrypoint in the generated interpreter code.
173 173 get_thread(java_thread);
174 174 movptr(tmp, Address(java_thread, JavaThread::jvmti_thread_state_offset()));
175 175 pushl(Address(tmp, JvmtiThreadState::earlyret_tos_offset()));
176 176 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), 1);
177 177 jmp(rax);
178 178 bind(L);
179 179 get_thread(java_thread);
180 180 }
181 181 }
182 182
183 183
184 184 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) {
185 185 assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");
186 186 movl(reg, Address(rsi, bcp_offset));
187 187 bswapl(reg);
188 188 shrl(reg, 16);
↓ open down ↓ |
188 lines elided |
↑ open up ↑ |
189 189 }
190 190
191 191
192 192 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_offset, bool giant_index) {
193 193 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
194 194 if (!giant_index) {
195 195 load_unsigned_short(reg, Address(rsi, bcp_offset));
196 196 } else {
197 197 assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
198 198 movl(reg, Address(rsi, bcp_offset));
199 + // Check if the secondary index definition is still ~x, otherwise
200 + // we have to change the following assembler code to calculate the
201 + // plain index.
199 202 assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
200 203 notl(reg); // convert to plain index
201 204 }
202 205 }
203 206
204 207
205 208 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register index,
206 209 int bcp_offset, bool giant_index) {
207 210 assert(cache != index, "must use different registers");
208 211 get_cache_index_at_bcp(index, bcp_offset, giant_index);
209 212 movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
210 213 assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
211 214 shlptr(index, 2); // convert from field index to ConstantPoolCacheEntry index
212 215 }
213 216
214 217
215 218 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
216 219 int bcp_offset, bool giant_index) {
217 220 assert(cache != tmp, "must use different register");
218 221 get_cache_index_at_bcp(tmp, bcp_offset, giant_index);
219 222 assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
220 223 // convert from field index to ConstantPoolCacheEntry index
221 224 // and from word offset to byte offset
222 225 shll(tmp, 2 + LogBytesPerWord);
223 226 movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
224 227 // skip past the header
225 228 addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
226 229 addptr(cache, tmp); // construct pointer to cache entry
227 230 }
228 231
229 232
230 233 // Generate a subtype check: branch to ok_is_subtype if sub_klass is
231 234 // a subtype of super_klass. EAX holds the super_klass. Blows ECX.
232 235 // Resets EDI to locals. Register sub_klass cannot be any of the above.
233 236 void InterpreterMacroAssembler::gen_subtype_check( Register Rsub_klass, Label &ok_is_subtype ) {
234 237 assert( Rsub_klass != rax, "rax, holds superklass" );
235 238 assert( Rsub_klass != rcx, "used as a temp" );
236 239 assert( Rsub_klass != rdi, "used as a temp, restored from locals" );
237 240
238 241 // Profile the not-null value's klass.
239 242 profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi
240 243
241 244 // Do the check.
242 245 check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx
243 246
244 247 // Profile the failure of the check.
245 248 profile_typecheck_failed(rcx); // blows rcx
246 249 }
247 250
248 251 void InterpreterMacroAssembler::f2ieee() {
249 252 if (IEEEPrecision) {
250 253 fstp_s(Address(rsp, 0));
251 254 fld_s(Address(rsp, 0));
252 255 }
253 256 }
254 257
255 258
256 259 void InterpreterMacroAssembler::d2ieee() {
257 260 if (IEEEPrecision) {
258 261 fstp_d(Address(rsp, 0));
259 262 fld_d(Address(rsp, 0));
260 263 }
261 264 }
262 265
263 266 // Java Expression Stack
264 267
265 268 #ifdef ASSERT
266 269 void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) {
267 270 if (TaggedStackInterpreter) {
268 271 Label okay;
269 272 cmpptr(Address(rsp, wordSize), (int32_t)t);
270 273 jcc(Assembler::equal, okay);
271 274 // Also compare if the stack value is zero, then the tag might
272 275 // not have been set coming from deopt.
273 276 cmpptr(Address(rsp, 0), 0);
274 277 jcc(Assembler::equal, okay);
275 278 stop("Java Expression stack tag value is bad");
276 279 bind(okay);
277 280 }
278 281 }
279 282 #endif // ASSERT
280 283
281 284 void InterpreterMacroAssembler::pop_ptr(Register r) {
282 285 debug_only(verify_stack_tag(frame::TagReference));
283 286 pop(r);
284 287 if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
285 288 }
286 289
287 290 void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) {
288 291 pop(r);
289 292 // Tag may not be reference for jsr, can be returnAddress
290 293 if (TaggedStackInterpreter) pop(tag);
291 294 }
292 295
293 296 void InterpreterMacroAssembler::pop_i(Register r) {
294 297 debug_only(verify_stack_tag(frame::TagValue));
295 298 pop(r);
296 299 if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
297 300 }
298 301
299 302 void InterpreterMacroAssembler::pop_l(Register lo, Register hi) {
300 303 debug_only(verify_stack_tag(frame::TagValue));
301 304 pop(lo);
302 305 if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
303 306 debug_only(verify_stack_tag(frame::TagValue));
304 307 pop(hi);
305 308 if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
306 309 }
307 310
308 311 void InterpreterMacroAssembler::pop_f() {
309 312 debug_only(verify_stack_tag(frame::TagValue));
310 313 fld_s(Address(rsp, 0));
311 314 addptr(rsp, 1 * wordSize);
312 315 if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
313 316 }
314 317
315 318 void InterpreterMacroAssembler::pop_d() {
316 319 // Write double to stack contiguously and load into ST0
317 320 pop_dtos_to_rsp();
318 321 fld_d(Address(rsp, 0));
319 322 addptr(rsp, 2 * wordSize);
320 323 }
321 324
322 325
323 326 // Pop the top of the java expression stack to execution stack (which
324 327 // happens to be the same place).
325 328 void InterpreterMacroAssembler::pop_dtos_to_rsp() {
326 329 if (TaggedStackInterpreter) {
327 330 // Pop double value into scratch registers
328 331 debug_only(verify_stack_tag(frame::TagValue));
329 332 pop(rax);
330 333 addptr(rsp, 1* wordSize);
331 334 debug_only(verify_stack_tag(frame::TagValue));
332 335 pop(rdx);
333 336 addptr(rsp, 1* wordSize);
334 337 push(rdx);
335 338 push(rax);
336 339 }
337 340 }
338 341
339 342 void InterpreterMacroAssembler::pop_ftos_to_rsp() {
340 343 if (TaggedStackInterpreter) {
341 344 debug_only(verify_stack_tag(frame::TagValue));
342 345 pop(rax);
343 346 addptr(rsp, 1 * wordSize);
344 347 push(rax); // ftos is at rsp
345 348 }
346 349 }
347 350
348 351 void InterpreterMacroAssembler::pop(TosState state) {
349 352 switch (state) {
350 353 case atos: pop_ptr(rax); break;
351 354 case btos: // fall through
352 355 case ctos: // fall through
353 356 case stos: // fall through
354 357 case itos: pop_i(rax); break;
355 358 case ltos: pop_l(rax, rdx); break;
356 359 case ftos: pop_f(); break;
357 360 case dtos: pop_d(); break;
358 361 case vtos: /* nothing to do */ break;
359 362 default : ShouldNotReachHere();
360 363 }
361 364 verify_oop(rax, state);
362 365 }
363 366
364 367 void InterpreterMacroAssembler::push_ptr(Register r) {
365 368 if (TaggedStackInterpreter) push(frame::TagReference);
366 369 push(r);
367 370 }
368 371
369 372 void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
370 373 if (TaggedStackInterpreter) push(tag); // tag first
371 374 push(r);
372 375 }
373 376
374 377 void InterpreterMacroAssembler::push_i(Register r) {
375 378 if (TaggedStackInterpreter) push(frame::TagValue);
376 379 push(r);
377 380 }
378 381
379 382 void InterpreterMacroAssembler::push_l(Register lo, Register hi) {
380 383 if (TaggedStackInterpreter) push(frame::TagValue);
381 384 push(hi);
382 385 if (TaggedStackInterpreter) push(frame::TagValue);
383 386 push(lo);
384 387 }
385 388
386 389 void InterpreterMacroAssembler::push_f() {
387 390 if (TaggedStackInterpreter) push(frame::TagValue);
388 391 // Do not schedule for no AGI! Never write beyond rsp!
389 392 subptr(rsp, 1 * wordSize);
390 393 fstp_s(Address(rsp, 0));
391 394 }
392 395
393 396 void InterpreterMacroAssembler::push_d(Register r) {
394 397 if (TaggedStackInterpreter) {
395 398 // Double values are stored as:
396 399 // tag
397 400 // high
398 401 // tag
399 402 // low
400 403 push(frame::TagValue);
401 404 subptr(rsp, 3 * wordSize);
402 405 fstp_d(Address(rsp, 0));
403 406 // move high word up to slot n-1
404 407 movl(r, Address(rsp, 1*wordSize));
405 408 movl(Address(rsp, 2*wordSize), r);
406 409 // move tag
407 410 movl(Address(rsp, 1*wordSize), frame::TagValue);
408 411 } else {
409 412 // Do not schedule for no AGI! Never write beyond rsp!
410 413 subptr(rsp, 2 * wordSize);
411 414 fstp_d(Address(rsp, 0));
412 415 }
413 416 }
414 417
415 418
416 419 void InterpreterMacroAssembler::push(TosState state) {
417 420 verify_oop(rax, state);
418 421 switch (state) {
419 422 case atos: push_ptr(rax); break;
420 423 case btos: // fall through
421 424 case ctos: // fall through
422 425 case stos: // fall through
423 426 case itos: push_i(rax); break;
424 427 case ltos: push_l(rax, rdx); break;
425 428 case ftos: push_f(); break;
426 429 case dtos: push_d(rax); break;
427 430 case vtos: /* nothing to do */ break;
428 431 default : ShouldNotReachHere();
429 432 }
430 433 }
431 434
432 435
433 436 // Tagged stack helpers for swap and dup
434 437 void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val,
435 438 Register tag) {
436 439 movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));
437 440 if (TaggedStackInterpreter) {
438 441 movptr(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)));
439 442 }
440 443 }
441 444
442 445 void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
443 446 Register tag) {
444 447 movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
445 448 if (TaggedStackInterpreter) {
446 449 movptr(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag);
447 450 }
448 451 }
449 452
450 453
451 454 // Tagged local support
452 455 void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) {
453 456 if (TaggedStackInterpreter) {
454 457 if (tag == frame::TagCategory2) {
455 458 movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)frame::TagValue);
456 459 movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)frame::TagValue);
457 460 } else {
458 461 movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)tag);
459 462 }
460 463 }
461 464 }
462 465
463 466 void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) {
464 467 if (TaggedStackInterpreter) {
465 468 if (tag == frame::TagCategory2) {
466 469 movptr(Address(rdi, idx, Interpreter::stackElementScale(),
467 470 Interpreter::local_tag_offset_in_bytes(1)), (int32_t)frame::TagValue);
468 471 movptr(Address(rdi, idx, Interpreter::stackElementScale(),
469 472 Interpreter::local_tag_offset_in_bytes(0)), (int32_t)frame::TagValue);
470 473 } else {
471 474 movptr(Address(rdi, idx, Interpreter::stackElementScale(),
472 475 Interpreter::local_tag_offset_in_bytes(0)), (int32_t)tag);
473 476 }
474 477 }
475 478 }
476 479
477 480 void InterpreterMacroAssembler::tag_local(Register tag, Register idx) {
478 481 if (TaggedStackInterpreter) {
479 482 // can only be TagValue or TagReference
480 483 movptr(Address(rdi, idx, Interpreter::stackElementScale(),
481 484 Interpreter::local_tag_offset_in_bytes(0)), tag);
482 485 }
483 486 }
484 487
485 488
486 489 void InterpreterMacroAssembler::tag_local(Register tag, int n) {
487 490 if (TaggedStackInterpreter) {
488 491 // can only be TagValue or TagReference
489 492 movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), tag);
490 493 }
491 494 }
492 495
493 496 #ifdef ASSERT
494 497 void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, int n) {
495 498 if (TaggedStackInterpreter) {
496 499 frame::Tag t = tag;
497 500 if (tag == frame::TagCategory2) {
498 501 Label nbl;
499 502 t = frame::TagValue; // change to what is stored in locals
500 503 cmpptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)t);
501 504 jcc(Assembler::equal, nbl);
502 505 stop("Local tag is bad for long/double");
503 506 bind(nbl);
504 507 }
505 508 Label notBad;
506 509 cmpptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)t);
507 510 jcc(Assembler::equal, notBad);
508 511 // Also compare if the local value is zero, then the tag might
509 512 // not have been set coming from deopt.
510 513 cmpptr(Address(rdi, Interpreter::local_offset_in_bytes(n)), 0);
511 514 jcc(Assembler::equal, notBad);
512 515 stop("Local tag is bad");
513 516 bind(notBad);
514 517 }
515 518 }
516 519
517 520 void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, Register idx) {
518 521 if (TaggedStackInterpreter) {
519 522 frame::Tag t = tag;
520 523 if (tag == frame::TagCategory2) {
521 524 Label nbl;
522 525 t = frame::TagValue; // change to what is stored in locals
523 526 cmpptr(Address(rdi, idx, Interpreter::stackElementScale(),
524 527 Interpreter::local_tag_offset_in_bytes(1)), (int32_t)t);
525 528 jcc(Assembler::equal, nbl);
526 529 stop("Local tag is bad for long/double");
527 530 bind(nbl);
528 531 }
529 532 Label notBad;
530 533 cmpl(Address(rdi, idx, Interpreter::stackElementScale(),
531 534 Interpreter::local_tag_offset_in_bytes(0)), (int32_t)t);
532 535 jcc(Assembler::equal, notBad);
533 536 // Also compare if the local value is zero, then the tag might
534 537 // not have been set coming from deopt.
535 538 cmpptr(Address(rdi, idx, Interpreter::stackElementScale(),
536 539 Interpreter::local_offset_in_bytes(0)), 0);
537 540 jcc(Assembler::equal, notBad);
538 541 stop("Local tag is bad");
539 542 bind(notBad);
540 543
541 544 }
542 545 }
543 546 #endif // ASSERT
544 547
545 548 void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) {
546 549 MacroAssembler::call_VM_leaf_base(entry_point, 0);
547 550 }
548 551
549 552
550 553 void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1) {
551 554 push(arg_1);
552 555 MacroAssembler::call_VM_leaf_base(entry_point, 1);
553 556 }
554 557
555 558
556 559 void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
557 560 push(arg_2);
558 561 push(arg_1);
559 562 MacroAssembler::call_VM_leaf_base(entry_point, 2);
560 563 }
561 564
562 565
563 566 void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
564 567 push(arg_3);
565 568 push(arg_2);
566 569 push(arg_1);
567 570 MacroAssembler::call_VM_leaf_base(entry_point, 3);
568 571 }
569 572
570 573
571 574 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() {
572 575 // set sender sp
573 576 lea(rsi, Address(rsp, wordSize));
574 577 // record last_sp
575 578 movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), rsi);
576 579 }
577 580
578 581
579 582 // Jump to from_interpreted entry of a call unless single stepping is possible
580 583 // in this thread in which case we must call the i2i entry
581 584 void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) {
582 585 prepare_to_jump_from_interpreted();
583 586
584 587 if (JvmtiExport::can_post_interpreter_events()) {
585 588 Label run_compiled_code;
586 589 // JVMTI events, such as single-stepping, are implemented partly by avoiding running
587 590 // compiled code in threads for which the event is enabled. Check here for
588 591 // interp_only_mode if these events CAN be enabled.
589 592 get_thread(temp);
590 593 // interp_only is an int, on little endian it is sufficient to test the byte only
591 594 // Is a cmpl faster (ce
592 595 cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0);
593 596 jcc(Assembler::zero, run_compiled_code);
594 597 jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
595 598 bind(run_compiled_code);
596 599 }
597 600
598 601 jmp(Address(method, methodOopDesc::from_interpreted_offset()));
599 602
600 603 }
601 604
602 605
603 606 // The following two routines provide a hook so that an implementation
604 607 // can schedule the dispatch in two parts. Intel does not do this.
605 608 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) {
606 609 // Nothing Intel-specific to be done here.
607 610 }
608 611
609 612 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
610 613 dispatch_next(state, step);
611 614 }
612 615
613 616 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table,
614 617 bool verifyoop) {
615 618 verify_FPU(1, state);
616 619 if (VerifyActivationFrameSize) {
617 620 Label L;
618 621 mov(rcx, rbp);
619 622 subptr(rcx, rsp);
620 623 int min_frame_size = (frame::link_offset - frame::interpreter_frame_initial_sp_offset) * wordSize;
621 624 cmpptr(rcx, min_frame_size);
622 625 jcc(Assembler::greaterEqual, L);
623 626 stop("broken stack frame");
624 627 bind(L);
625 628 }
626 629 if (verifyoop) verify_oop(rax, state);
627 630 Address index(noreg, rbx, Address::times_ptr);
628 631 ExternalAddress tbl((address)table);
629 632 ArrayAddress dispatch(tbl, index);
630 633 jump(dispatch);
631 634 }
632 635
633 636
634 637 void InterpreterMacroAssembler::dispatch_only(TosState state) {
635 638 dispatch_base(state, Interpreter::dispatch_table(state));
636 639 }
637 640
638 641
639 642 void InterpreterMacroAssembler::dispatch_only_normal(TosState state) {
640 643 dispatch_base(state, Interpreter::normal_table(state));
641 644 }
642 645
643 646 void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) {
644 647 dispatch_base(state, Interpreter::normal_table(state), false);
645 648 }
646 649
647 650
648 651 void InterpreterMacroAssembler::dispatch_next(TosState state, int step) {
649 652 // load next bytecode (load before advancing rsi to prevent AGI)
650 653 load_unsigned_byte(rbx, Address(rsi, step));
651 654 // advance rsi
652 655 increment(rsi, step);
653 656 dispatch_base(state, Interpreter::dispatch_table(state));
654 657 }
655 658
656 659
657 660 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
658 661 // load current bytecode
659 662 load_unsigned_byte(rbx, Address(rsi, 0));
660 663 dispatch_base(state, table);
661 664 }
662 665
663 666 // remove activation
664 667 //
665 668 // Unlock the receiver if this is a synchronized method.
666 669 // Unlock any Java monitors from syncronized blocks.
667 670 // Remove the activation from the stack.
668 671 //
669 672 // If there are locked Java monitors
670 673 // If throw_monitor_exception
671 674 // throws IllegalMonitorStateException
672 675 // Else if install_monitor_exception
673 676 // installs IllegalMonitorStateException
674 677 // Else
675 678 // no error processing
676 679 void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_addr,
677 680 bool throw_monitor_exception,
678 681 bool install_monitor_exception,
679 682 bool notify_jvmdi) {
680 683 // Note: Registers rax, rdx and FPU ST(0) may be in use for the result
681 684 // check if synchronized method
682 685 Label unlocked, unlock, no_unlock;
683 686
684 687 get_thread(rcx);
685 688 const Address do_not_unlock_if_synchronized(rcx,
686 689 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
687 690
688 691 movbool(rbx, do_not_unlock_if_synchronized);
689 692 mov(rdi,rbx);
690 693 movbool(do_not_unlock_if_synchronized, false); // reset the flag
691 694
692 695 movptr(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); // get method access flags
693 696 movl(rcx, Address(rbx, methodOopDesc::access_flags_offset()));
694 697
695 698 testl(rcx, JVM_ACC_SYNCHRONIZED);
696 699 jcc(Assembler::zero, unlocked);
697 700
698 701 // Don't unlock anything if the _do_not_unlock_if_synchronized flag
699 702 // is set.
700 703 mov(rcx,rdi);
701 704 testbool(rcx);
702 705 jcc(Assembler::notZero, no_unlock);
703 706
704 707 // unlock monitor
705 708 push(state); // save result
706 709
707 710 // BasicObjectLock will be first in list, since this is a synchronized method. However, need
708 711 // to check that the object has not been unlocked by an explicit monitorexit bytecode.
709 712 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
710 713 lea (rdx, monitor); // address of first monitor
711 714
712 715 movptr (rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes()));
713 716 testptr(rax, rax);
714 717 jcc (Assembler::notZero, unlock);
715 718
716 719 pop(state);
717 720 if (throw_monitor_exception) {
718 721 empty_FPU_stack(); // remove possible return value from FPU-stack, otherwise stack could overflow
719 722
720 723 // Entry already unlocked, need to throw exception
721 724 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
722 725 should_not_reach_here();
723 726 } else {
724 727 // Monitor already unlocked during a stack unroll.
725 728 // If requested, install an illegal_monitor_state_exception.
726 729 // Continue with stack unrolling.
727 730 if (install_monitor_exception) {
728 731 empty_FPU_stack(); // remove possible return value from FPU-stack, otherwise stack could overflow
729 732 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
730 733 }
731 734 jmp(unlocked);
732 735 }
733 736
734 737 bind(unlock);
735 738 unlock_object(rdx);
736 739 pop(state);
737 740
738 741 // Check that for block-structured locking (i.e., that all locked objects has been unlocked)
739 742 bind(unlocked);
740 743
741 744 // rax, rdx: Might contain return value
742 745
743 746 // Check that all monitors are unlocked
744 747 {
745 748 Label loop, exception, entry, restart;
746 749 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
747 750 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
748 751 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
749 752
750 753 bind(restart);
751 754 movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry
752 755 lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
753 756 jmp(entry);
754 757
755 758 // Entry already locked, need to throw exception
756 759 bind(exception);
757 760
758 761 if (throw_monitor_exception) {
759 762 empty_FPU_stack(); // remove possible return value from FPU-stack, otherwise stack could overflow
760 763
761 764 // Throw exception
762 765 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
763 766 should_not_reach_here();
764 767 } else {
765 768 // Stack unrolling. Unlock object and install illegal_monitor_exception
766 769 // Unlock does not block, so don't have to worry about the frame
767 770
768 771 push(state);
769 772 mov(rdx, rcx);
770 773 unlock_object(rdx);
771 774 pop(state);
772 775
773 776 if (install_monitor_exception) {
774 777 empty_FPU_stack(); // remove possible return value from FPU-stack, otherwise stack could overflow
775 778 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
776 779 }
777 780
778 781 jmp(restart);
779 782 }
780 783
781 784 bind(loop);
782 785 cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used
783 786 jcc(Assembler::notEqual, exception);
784 787
785 788 addptr(rcx, entry_size); // otherwise advance to next entry
786 789 bind(entry);
787 790 cmpptr(rcx, rbx); // check if bottom reached
788 791 jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
789 792 }
790 793
791 794 bind(no_unlock);
792 795
793 796 // jvmti support
794 797 if (notify_jvmdi) {
795 798 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
796 799 } else {
797 800 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
798 801 }
799 802
800 803 // remove activation
801 804 movptr(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
802 805 leave(); // remove frame anchor
803 806 pop(ret_addr); // get return address
804 807 mov(rsp, rbx); // set sp to sender sp
805 808 if (UseSSE) {
806 809 // float and double are returned in xmm register in SSE-mode
807 810 if (state == ftos && UseSSE >= 1) {
808 811 subptr(rsp, wordSize);
809 812 fstp_s(Address(rsp, 0));
810 813 movflt(xmm0, Address(rsp, 0));
811 814 addptr(rsp, wordSize);
812 815 } else if (state == dtos && UseSSE >= 2) {
813 816 subptr(rsp, 2*wordSize);
814 817 fstp_d(Address(rsp, 0));
815 818 movdbl(xmm0, Address(rsp, 0));
816 819 addptr(rsp, 2*wordSize);
817 820 }
818 821 }
819 822 }
820 823
821 824 #endif /* !CC_INTERP */
822 825
823 826
824 827 // Lock object
825 828 //
826 829 // Argument: rdx : Points to BasicObjectLock to be used for locking. Must
827 830 // be initialized with object to lock
828 831 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
829 832 assert(lock_reg == rdx, "The argument is only for looks. It must be rdx");
830 833
831 834 if (UseHeavyMonitors) {
832 835 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg);
833 836 } else {
834 837
835 838 Label done;
836 839
837 840 const Register swap_reg = rax; // Must use rax, for cmpxchg instruction
838 841 const Register obj_reg = rcx; // Will contain the oop
839 842
840 843 const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
841 844 const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
842 845 const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes();
843 846
844 847 Label slow_case;
845 848
846 849 // Load object pointer into obj_reg %rcx
847 850 movptr(obj_reg, Address(lock_reg, obj_offset));
848 851
849 852 if (UseBiasedLocking) {
850 853 // Note: we use noreg for the temporary register since it's hard
851 854 // to come up with a free register on all incoming code paths
852 855 biased_locking_enter(lock_reg, obj_reg, swap_reg, noreg, false, done, &slow_case);
853 856 }
854 857
855 858 // Load immediate 1 into swap_reg %rax,
856 859 movptr(swap_reg, (int32_t)1);
857 860
858 861 // Load (object->mark() | 1) into swap_reg %rax,
859 862 orptr(swap_reg, Address(obj_reg, 0));
860 863
861 864 // Save (object->mark() | 1) into BasicLock's displaced header
862 865 movptr(Address(lock_reg, mark_offset), swap_reg);
863 866
864 867 assert(lock_offset == 0, "displached header must be first word in BasicObjectLock");
865 868 if (os::is_MP()) {
866 869 lock();
867 870 }
868 871 cmpxchgptr(lock_reg, Address(obj_reg, 0));
869 872 if (PrintBiasedLockingStatistics) {
870 873 cond_inc32(Assembler::zero,
871 874 ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
872 875 }
873 876 jcc(Assembler::zero, done);
874 877
875 878 // Test if the oopMark is an obvious stack pointer, i.e.,
876 879 // 1) (mark & 3) == 0, and
877 880 // 2) rsp <= mark < mark + os::pagesize()
878 881 //
879 882 // These 3 tests can be done by evaluating the following
880 883 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
881 884 // assuming both stack pointer and pagesize have their
882 885 // least significant 2 bits clear.
883 886 // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
884 887 subptr(swap_reg, rsp);
885 888 andptr(swap_reg, 3 - os::vm_page_size());
886 889
887 890 // Save the test result, for recursive case, the result is zero
888 891 movptr(Address(lock_reg, mark_offset), swap_reg);
889 892
890 893 if (PrintBiasedLockingStatistics) {
891 894 cond_inc32(Assembler::zero,
892 895 ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
893 896 }
894 897 jcc(Assembler::zero, done);
895 898
896 899 bind(slow_case);
897 900
898 901 // Call the runtime routine for slow case
899 902 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg);
900 903
901 904 bind(done);
902 905 }
903 906 }
904 907
905 908
906 909 // Unlocks an object. Used in monitorexit bytecode and remove_activation.
907 910 //
908 911 // Argument: rdx : Points to BasicObjectLock structure for lock
909 912 // Throw an IllegalMonitorException if object is not locked by current thread
910 913 //
911 914 // Uses: rax, rbx, rcx, rdx
912 915 void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
913 916 assert(lock_reg == rdx, "The argument is only for looks. It must be rdx");
914 917
915 918 if (UseHeavyMonitors) {
916 919 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
917 920 } else {
918 921 Label done;
919 922
920 923 const Register swap_reg = rax; // Must use rax, for cmpxchg instruction
921 924 const Register header_reg = rbx; // Will contain the old oopMark
922 925 const Register obj_reg = rcx; // Will contain the oop
923 926
924 927 save_bcp(); // Save in case of exception
925 928
926 929 // Convert from BasicObjectLock structure to object and BasicLock structure
927 930 // Store the BasicLock address into %rax,
928 931 lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
929 932
930 933 // Load oop into obj_reg(%rcx)
931 934 movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes ()));
932 935
933 936 // Free entry
934 937 movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD);
935 938
936 939 if (UseBiasedLocking) {
937 940 biased_locking_exit(obj_reg, header_reg, done);
938 941 }
939 942
940 943 // Load the old header from BasicLock structure
941 944 movptr(header_reg, Address(swap_reg, BasicLock::displaced_header_offset_in_bytes()));
942 945
943 946 // Test for recursion
944 947 testptr(header_reg, header_reg);
945 948
946 949 // zero for recursive case
947 950 jcc(Assembler::zero, done);
948 951
949 952 // Atomic swap back the old header
950 953 if (os::is_MP()) lock();
951 954 cmpxchgptr(header_reg, Address(obj_reg, 0));
952 955
953 956 // zero for recursive case
954 957 jcc(Assembler::zero, done);
955 958
956 959 // Call the runtime routine for slow case.
957 960 movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), obj_reg); // restore obj
958 961 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
959 962
960 963 bind(done);
961 964
962 965 restore_bcp();
963 966 }
964 967 }
965 968
966 969
967 970 #ifndef CC_INTERP
968 971
969 972 // Test ImethodDataPtr. If it is null, continue at the specified label
970 973 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& zero_continue) {
971 974 assert(ProfileInterpreter, "must be profiling interpreter");
972 975 movptr(mdp, Address(rbp, frame::interpreter_frame_mdx_offset * wordSize));
973 976 testptr(mdp, mdp);
974 977 jcc(Assembler::zero, zero_continue);
975 978 }
976 979
977 980
978 981 // Set the method data pointer for the current bcp.
979 982 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
980 983 assert(ProfileInterpreter, "must be profiling interpreter");
981 984 Label zero_continue;
982 985 push(rax);
983 986 push(rbx);
984 987
985 988 get_method(rbx);
986 989 // Test MDO to avoid the call if it is NULL.
987 990 movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
988 991 testptr(rax, rax);
989 992 jcc(Assembler::zero, zero_continue);
990 993
991 994 // rbx,: method
992 995 // rsi: bcp
993 996 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, rsi);
994 997 // rax,: mdi
995 998
996 999 movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
997 1000 testptr(rbx, rbx);
998 1001 jcc(Assembler::zero, zero_continue);
999 1002 addptr(rbx, in_bytes(methodDataOopDesc::data_offset()));
1000 1003 addptr(rbx, rax);
1001 1004 movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx);
1002 1005
1003 1006 bind(zero_continue);
1004 1007 pop(rbx);
1005 1008 pop(rax);
1006 1009 }
1007 1010
1008 1011 void InterpreterMacroAssembler::verify_method_data_pointer() {
1009 1012 assert(ProfileInterpreter, "must be profiling interpreter");
1010 1013 #ifdef ASSERT
1011 1014 Label verify_continue;
1012 1015 push(rax);
1013 1016 push(rbx);
1014 1017 push(rcx);
1015 1018 push(rdx);
1016 1019 test_method_data_pointer(rcx, verify_continue); // If mdp is zero, continue
1017 1020 get_method(rbx);
1018 1021
1019 1022 // If the mdp is valid, it will point to a DataLayout header which is
1020 1023 // consistent with the bcp. The converse is highly probable also.
1021 1024 load_unsigned_short(rdx, Address(rcx, in_bytes(DataLayout::bci_offset())));
1022 1025 addptr(rdx, Address(rbx, methodOopDesc::const_offset()));
1023 1026 lea(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
1024 1027 cmpptr(rdx, rsi);
1025 1028 jcc(Assembler::equal, verify_continue);
1026 1029 // rbx,: method
1027 1030 // rsi: bcp
1028 1031 // rcx: mdp
1029 1032 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), rbx, rsi, rcx);
1030 1033 bind(verify_continue);
1031 1034 pop(rdx);
1032 1035 pop(rcx);
1033 1036 pop(rbx);
1034 1037 pop(rax);
1035 1038 #endif // ASSERT
1036 1039 }
1037 1040
1038 1041
1039 1042 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, int constant, Register value) {
1040 1043 // %%% this seems to be used to store counter data which is surely 32bits
1041 1044 // however 64bit side stores 64 bits which seems wrong
1042 1045 assert(ProfileInterpreter, "must be profiling interpreter");
1043 1046 Address data(mdp_in, constant);
1044 1047 movptr(data, value);
1045 1048 }
1046 1049
1047 1050
1048 1051 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
1049 1052 int constant,
1050 1053 bool decrement) {
1051 1054 // Counter address
1052 1055 Address data(mdp_in, constant);
1053 1056
1054 1057 increment_mdp_data_at(data, decrement);
1055 1058 }
1056 1059
1057 1060
1058 1061 void InterpreterMacroAssembler::increment_mdp_data_at(Address data,
1059 1062 bool decrement) {
1060 1063
1061 1064 assert( DataLayout::counter_increment==1, "flow-free idiom only works with 1" );
1062 1065 assert(ProfileInterpreter, "must be profiling interpreter");
1063 1066
1064 1067 // %%% 64bit treats this as 64 bit which seems unlikely
1065 1068 if (decrement) {
1066 1069 // Decrement the register. Set condition codes.
1067 1070 addl(data, -DataLayout::counter_increment);
1068 1071 // If the decrement causes the counter to overflow, stay negative
1069 1072 Label L;
1070 1073 jcc(Assembler::negative, L);
1071 1074 addl(data, DataLayout::counter_increment);
1072 1075 bind(L);
1073 1076 } else {
1074 1077 assert(DataLayout::counter_increment == 1,
1075 1078 "flow-free idiom only works with 1");
1076 1079 // Increment the register. Set carry flag.
1077 1080 addl(data, DataLayout::counter_increment);
1078 1081 // If the increment causes the counter to overflow, pull back by 1.
1079 1082 sbbl(data, 0);
1080 1083 }
1081 1084 }
1082 1085
1083 1086
1084 1087 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
1085 1088 Register reg,
1086 1089 int constant,
1087 1090 bool decrement) {
1088 1091 Address data(mdp_in, reg, Address::times_1, constant);
1089 1092
1090 1093 increment_mdp_data_at(data, decrement);
1091 1094 }
1092 1095
1093 1096
1094 1097 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in, int flag_byte_constant) {
1095 1098 assert(ProfileInterpreter, "must be profiling interpreter");
1096 1099 int header_offset = in_bytes(DataLayout::header_offset());
1097 1100 int header_bits = DataLayout::flag_mask_to_header_mask(flag_byte_constant);
1098 1101 // Set the flag
1099 1102 orl(Address(mdp_in, header_offset), header_bits);
1100 1103 }
1101 1104
1102 1105
1103 1106
1104 1107 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
1105 1108 int offset,
1106 1109 Register value,
1107 1110 Register test_value_out,
1108 1111 Label& not_equal_continue) {
1109 1112 assert(ProfileInterpreter, "must be profiling interpreter");
1110 1113 if (test_value_out == noreg) {
1111 1114 cmpptr(value, Address(mdp_in, offset));
1112 1115 } else {
1113 1116 // Put the test value into a register, so caller can use it:
1114 1117 movptr(test_value_out, Address(mdp_in, offset));
1115 1118 cmpptr(test_value_out, value);
1116 1119 }
1117 1120 jcc(Assembler::notEqual, not_equal_continue);
1118 1121 }
1119 1122
1120 1123
1121 1124 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, int offset_of_disp) {
1122 1125 assert(ProfileInterpreter, "must be profiling interpreter");
1123 1126 Address disp_address(mdp_in, offset_of_disp);
1124 1127 addptr(mdp_in,disp_address);
1125 1128 movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
1126 1129 }
1127 1130
1128 1131
1129 1132 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp) {
1130 1133 assert(ProfileInterpreter, "must be profiling interpreter");
1131 1134 Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp);
1132 1135 addptr(mdp_in, disp_address);
1133 1136 movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
1134 1137 }
1135 1138
1136 1139
1137 1140 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, int constant) {
1138 1141 assert(ProfileInterpreter, "must be profiling interpreter");
1139 1142 addptr(mdp_in, constant);
1140 1143 movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
1141 1144 }
1142 1145
1143 1146
1144 1147 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
1145 1148 assert(ProfileInterpreter, "must be profiling interpreter");
1146 1149 push(return_bci); // save/restore across call_VM
1147 1150 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci);
1148 1151 pop(return_bci);
1149 1152 }
1150 1153
1151 1154
1152 1155 void InterpreterMacroAssembler::profile_taken_branch(Register mdp, Register bumped_count) {
1153 1156 if (ProfileInterpreter) {
1154 1157 Label profile_continue;
1155 1158
1156 1159 // If no method data exists, go to profile_continue.
1157 1160 // Otherwise, assign to mdp
1158 1161 test_method_data_pointer(mdp, profile_continue);
1159 1162
1160 1163 // We are taking a branch. Increment the taken count.
1161 1164 // We inline increment_mdp_data_at to return bumped_count in a register
1162 1165 //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
1163 1166 Address data(mdp, in_bytes(JumpData::taken_offset()));
1164 1167
1165 1168 // %%% 64bit treats these cells as 64 bit but they seem to be 32 bit
1166 1169 movl(bumped_count,data);
1167 1170 assert( DataLayout::counter_increment==1, "flow-free idiom only works with 1" );
1168 1171 addl(bumped_count, DataLayout::counter_increment);
1169 1172 sbbl(bumped_count, 0);
1170 1173 movl(data,bumped_count); // Store back out
1171 1174
1172 1175 // The method data pointer needs to be updated to reflect the new target.
1173 1176 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1174 1177 bind (profile_continue);
1175 1178 }
1176 1179 }
1177 1180
1178 1181
1179 1182 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1180 1183 if (ProfileInterpreter) {
1181 1184 Label profile_continue;
1182 1185
1183 1186 // If no method data exists, go to profile_continue.
1184 1187 test_method_data_pointer(mdp, profile_continue);
1185 1188
1186 1189 // We are taking a branch. Increment the not taken count.
1187 1190 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1188 1191
1189 1192 // The method data pointer needs to be updated to correspond to the next bytecode
1190 1193 update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1191 1194 bind (profile_continue);
1192 1195 }
1193 1196 }
1194 1197
1195 1198
1196 1199 void InterpreterMacroAssembler::profile_call(Register mdp) {
1197 1200 if (ProfileInterpreter) {
1198 1201 Label profile_continue;
1199 1202
1200 1203 // If no method data exists, go to profile_continue.
1201 1204 test_method_data_pointer(mdp, profile_continue);
1202 1205
1203 1206 // We are making a call. Increment the count.
1204 1207 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1205 1208
1206 1209 // The method data pointer needs to be updated to reflect the new target.
1207 1210 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1208 1211 bind (profile_continue);
1209 1212 }
1210 1213 }
1211 1214
1212 1215
1213 1216 void InterpreterMacroAssembler::profile_final_call(Register mdp) {
1214 1217 if (ProfileInterpreter) {
1215 1218 Label profile_continue;
1216 1219
1217 1220 // If no method data exists, go to profile_continue.
1218 1221 test_method_data_pointer(mdp, profile_continue);
1219 1222
1220 1223 // We are making a call. Increment the count.
1221 1224 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1222 1225
1223 1226 // The method data pointer needs to be updated to reflect the new target.
1224 1227 update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
1225 1228 bind (profile_continue);
1226 1229 }
1227 1230 }
1228 1231
1229 1232
1230 1233 void InterpreterMacroAssembler::profile_virtual_call(Register receiver, Register mdp,
1231 1234 Register reg2,
1232 1235 bool receiver_can_be_null) {
1233 1236 if (ProfileInterpreter) {
1234 1237 Label profile_continue;
1235 1238
1236 1239 // If no method data exists, go to profile_continue.
1237 1240 test_method_data_pointer(mdp, profile_continue);
1238 1241
1239 1242 // We are making a call. Increment the count.
1240 1243 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1241 1244
1242 1245 Label skip_receiver_profile;
1243 1246 if (receiver_can_be_null) {
1244 1247 testptr(receiver, receiver);
1245 1248 jcc(Assembler::zero, skip_receiver_profile);
1246 1249 }
1247 1250
1248 1251 // Record the receiver type.
1249 1252 record_klass_in_profile(receiver, mdp, reg2);
1250 1253 bind(skip_receiver_profile);
1251 1254
1252 1255 // The method data pointer needs to be updated to reflect the new target.
1253 1256 update_mdp_by_constant(mdp,
1254 1257 in_bytes(VirtualCallData::
1255 1258 virtual_call_data_size()));
1256 1259 bind(profile_continue);
1257 1260 }
1258 1261 }
1259 1262
1260 1263
1261 1264 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1262 1265 Register receiver, Register mdp,
1263 1266 Register reg2,
1264 1267 int start_row, Label& done) {
1265 1268 if (TypeProfileWidth == 0)
1266 1269 return;
1267 1270
1268 1271 int last_row = VirtualCallData::row_limit() - 1;
1269 1272 assert(start_row <= last_row, "must be work left to do");
1270 1273 // Test this row for both the receiver and for null.
1271 1274 // Take any of three different outcomes:
1272 1275 // 1. found receiver => increment count and goto done
1273 1276 // 2. found null => keep looking for case 1, maybe allocate this cell
1274 1277 // 3. found something else => keep looking for cases 1 and 2
1275 1278 // Case 3 is handled by a recursive call.
1276 1279 for (int row = start_row; row <= last_row; row++) {
1277 1280 Label next_test;
1278 1281 bool test_for_null_also = (row == start_row);
1279 1282
1280 1283 // See if the receiver is receiver[n].
1281 1284 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
1282 1285 test_mdp_data_at(mdp, recvr_offset, receiver,
1283 1286 (test_for_null_also ? reg2 : noreg),
1284 1287 next_test);
1285 1288 // (Reg2 now contains the receiver from the CallData.)
1286 1289
1287 1290 // The receiver is receiver[n]. Increment count[n].
1288 1291 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
1289 1292 increment_mdp_data_at(mdp, count_offset);
1290 1293 jmp(done);
1291 1294 bind(next_test);
1292 1295
1293 1296 if (row == start_row) {
1294 1297 // Failed the equality check on receiver[n]... Test for null.
1295 1298 testptr(reg2, reg2);
1296 1299 if (start_row == last_row) {
1297 1300 // The only thing left to do is handle the null case.
1298 1301 jcc(Assembler::notZero, done);
1299 1302 break;
1300 1303 }
1301 1304 // Since null is rare, make it be the branch-taken case.
1302 1305 Label found_null;
1303 1306 jcc(Assembler::zero, found_null);
1304 1307
1305 1308 // Put all the "Case 3" tests here.
1306 1309 record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done);
1307 1310
1308 1311 // Found a null. Keep searching for a matching receiver,
1309 1312 // but remember that this is an empty (unused) slot.
1310 1313 bind(found_null);
1311 1314 }
1312 1315 }
1313 1316
1314 1317 // In the fall-through case, we found no matching receiver, but we
1315 1318 // observed the receiver[start_row] is NULL.
1316 1319
1317 1320 // Fill in the receiver field and increment the count.
1318 1321 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
1319 1322 set_mdp_data_at(mdp, recvr_offset, receiver);
1320 1323 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
1321 1324 movptr(reg2, (int32_t)DataLayout::counter_increment);
1322 1325 set_mdp_data_at(mdp, count_offset, reg2);
1323 1326 jmp(done);
1324 1327 }
1325 1328
1326 1329 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1327 1330 Register mdp,
1328 1331 Register reg2) {
1329 1332 assert(ProfileInterpreter, "must be profiling");
1330 1333 Label done;
1331 1334
1332 1335 record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
1333 1336
1334 1337 bind (done);
1335 1338 }
1336 1339
1337 1340 void InterpreterMacroAssembler::profile_ret(Register return_bci, Register mdp) {
1338 1341 if (ProfileInterpreter) {
1339 1342 Label profile_continue;
1340 1343 uint row;
1341 1344
1342 1345 // If no method data exists, go to profile_continue.
1343 1346 test_method_data_pointer(mdp, profile_continue);
1344 1347
1345 1348 // Update the total ret count.
1346 1349 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1347 1350
1348 1351 for (row = 0; row < RetData::row_limit(); row++) {
1349 1352 Label next_test;
1350 1353
1351 1354 // See if return_bci is equal to bci[n]:
1352 1355 test_mdp_data_at(mdp, in_bytes(RetData::bci_offset(row)), return_bci,
1353 1356 noreg, next_test);
1354 1357
1355 1358 // return_bci is equal to bci[n]. Increment the count.
1356 1359 increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row)));
1357 1360
1358 1361 // The method data pointer needs to be updated to reflect the new target.
1359 1362 update_mdp_by_offset(mdp, in_bytes(RetData::bci_displacement_offset(row)));
1360 1363 jmp(profile_continue);
1361 1364 bind(next_test);
1362 1365 }
1363 1366
1364 1367 update_mdp_for_ret(return_bci);
1365 1368
1366 1369 bind (profile_continue);
1367 1370 }
1368 1371 }
1369 1372
1370 1373
1371 1374 void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
1372 1375 if (ProfileInterpreter) {
1373 1376 Label profile_continue;
1374 1377
1375 1378 // If no method data exists, go to profile_continue.
1376 1379 test_method_data_pointer(mdp, profile_continue);
1377 1380
1378 1381 set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
1379 1382
1380 1383 // The method data pointer needs to be updated.
1381 1384 int mdp_delta = in_bytes(BitData::bit_data_size());
1382 1385 if (TypeProfileCasts) {
1383 1386 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1384 1387 }
1385 1388 update_mdp_by_constant(mdp, mdp_delta);
1386 1389
1387 1390 bind (profile_continue);
1388 1391 }
1389 1392 }
1390 1393
1391 1394
1392 1395 void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp) {
1393 1396 if (ProfileInterpreter && TypeProfileCasts) {
1394 1397 Label profile_continue;
1395 1398
1396 1399 // If no method data exists, go to profile_continue.
1397 1400 test_method_data_pointer(mdp, profile_continue);
1398 1401
1399 1402 int count_offset = in_bytes(CounterData::count_offset());
1400 1403 // Back up the address, since we have already bumped the mdp.
1401 1404 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());
1402 1405
1403 1406 // *Decrement* the counter. We expect to see zero or small negatives.
1404 1407 increment_mdp_data_at(mdp, count_offset, true);
1405 1408
1406 1409 bind (profile_continue);
1407 1410 }
1408 1411 }
1409 1412
1410 1413
1411 1414 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2)
1412 1415 {
1413 1416 if (ProfileInterpreter) {
1414 1417 Label profile_continue;
1415 1418
1416 1419 // If no method data exists, go to profile_continue.
1417 1420 test_method_data_pointer(mdp, profile_continue);
1418 1421
1419 1422 // The method data pointer needs to be updated.
1420 1423 int mdp_delta = in_bytes(BitData::bit_data_size());
1421 1424 if (TypeProfileCasts) {
1422 1425 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1423 1426
1424 1427 // Record the object type.
1425 1428 record_klass_in_profile(klass, mdp, reg2);
1426 1429 assert(reg2 == rdi, "we know how to fix this blown reg");
1427 1430 restore_locals(); // Restore EDI
1428 1431 }
1429 1432 update_mdp_by_constant(mdp, mdp_delta);
1430 1433
1431 1434 bind(profile_continue);
1432 1435 }
1433 1436 }
1434 1437
1435 1438
1436 1439 void InterpreterMacroAssembler::profile_switch_default(Register mdp) {
1437 1440 if (ProfileInterpreter) {
1438 1441 Label profile_continue;
1439 1442
1440 1443 // If no method data exists, go to profile_continue.
1441 1444 test_method_data_pointer(mdp, profile_continue);
1442 1445
1443 1446 // Update the default case count
1444 1447 increment_mdp_data_at(mdp, in_bytes(MultiBranchData::default_count_offset()));
1445 1448
1446 1449 // The method data pointer needs to be updated.
1447 1450 update_mdp_by_offset(mdp, in_bytes(MultiBranchData::default_displacement_offset()));
1448 1451
1449 1452 bind (profile_continue);
1450 1453 }
1451 1454 }
1452 1455
1453 1456
1454 1457 void InterpreterMacroAssembler::profile_switch_case(Register index, Register mdp, Register reg2) {
1455 1458 if (ProfileInterpreter) {
1456 1459 Label profile_continue;
1457 1460
1458 1461 // If no method data exists, go to profile_continue.
1459 1462 test_method_data_pointer(mdp, profile_continue);
1460 1463
1461 1464 // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes()
1462 1465 movptr(reg2, (int32_t)in_bytes(MultiBranchData::per_case_size()));
1463 1466 // index is positive and so should have correct value if this code were
1464 1467 // used on 64bits
1465 1468 imulptr(index, reg2);
1466 1469 addptr(index, in_bytes(MultiBranchData::case_array_offset()));
1467 1470
1468 1471 // Update the case count
1469 1472 increment_mdp_data_at(mdp, index, in_bytes(MultiBranchData::relative_count_offset()));
1470 1473
1471 1474 // The method data pointer needs to be updated.
1472 1475 update_mdp_by_offset(mdp, index, in_bytes(MultiBranchData::relative_displacement_offset()));
1473 1476
1474 1477 bind (profile_continue);
1475 1478 }
1476 1479 }
1477 1480
1478 1481 #endif // !CC_INTERP
1479 1482
1480 1483
1481 1484
1482 1485 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
1483 1486 if (state == atos) MacroAssembler::verify_oop(reg);
1484 1487 }
1485 1488
1486 1489
1487 1490 #ifndef CC_INTERP
1488 1491 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
1489 1492 if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);
1490 1493 }
1491 1494
1492 1495 #endif /* CC_INTERP */
1493 1496
1494 1497
1495 1498 void InterpreterMacroAssembler::notify_method_entry() {
1496 1499 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1497 1500 // track stack depth. If it is possible to enter interp_only_mode we add
1498 1501 // the code to check if the event should be sent.
1499 1502 if (JvmtiExport::can_post_interpreter_events()) {
1500 1503 Label L;
1501 1504 get_thread(rcx);
1502 1505 movl(rcx, Address(rcx, JavaThread::interp_only_mode_offset()));
1503 1506 testl(rcx,rcx);
1504 1507 jcc(Assembler::zero, L);
1505 1508 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
1506 1509 bind(L);
1507 1510 }
1508 1511
1509 1512 {
1510 1513 SkipIfEqual skip_if(this, &DTraceMethodProbes, 0);
1511 1514 get_thread(rcx);
1512 1515 get_method(rbx);
1513 1516 call_VM_leaf(
1514 1517 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), rcx, rbx);
1515 1518 }
1516 1519
1517 1520 // RedefineClasses() tracing support for obsolete method entry
1518 1521 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
1519 1522 get_thread(rcx);
1520 1523 get_method(rbx);
1521 1524 call_VM_leaf(
1522 1525 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1523 1526 rcx, rbx);
1524 1527 }
1525 1528 }
1526 1529
1527 1530
1528 1531 void InterpreterMacroAssembler::notify_method_exit(
1529 1532 TosState state, NotifyMethodExitMode mode) {
1530 1533 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1531 1534 // track stack depth. If it is possible to enter interp_only_mode we add
1532 1535 // the code to check if the event should be sent.
1533 1536 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
1534 1537 Label L;
1535 1538 // Note: frame::interpreter_frame_result has a dependency on how the
1536 1539 // method result is saved across the call to post_method_exit. If this
1537 1540 // is changed then the interpreter_frame_result implementation will
1538 1541 // need to be updated too.
1539 1542
1540 1543 // For c++ interpreter the result is always stored at a known location in the frame
1541 1544 // template interpreter will leave it on the top of the stack.
1542 1545 NOT_CC_INTERP(push(state);)
1543 1546 get_thread(rcx);
1544 1547 movl(rcx, Address(rcx, JavaThread::interp_only_mode_offset()));
1545 1548 testl(rcx,rcx);
1546 1549 jcc(Assembler::zero, L);
1547 1550 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
1548 1551 bind(L);
1549 1552 NOT_CC_INTERP(pop(state);)
1550 1553 }
1551 1554
1552 1555 {
1553 1556 SkipIfEqual skip_if(this, &DTraceMethodProbes, 0);
1554 1557 NOT_CC_INTERP(push(state));
1555 1558 get_thread(rbx);
1556 1559 get_method(rcx);
1557 1560 call_VM_leaf(
1558 1561 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1559 1562 rbx, rcx);
1560 1563 NOT_CC_INTERP(pop(state));
1561 1564 }
1562 1565 }
↓ open down ↓ |
1354 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX