Print this page
rev 1021 : 6858164: invokedynamic code needs some cleanup (post-6655638)
Note: The bug ID for this change set was erroneously used to call for review of 6815692.
Summary: Fix several crashers, remove needless paths for boxed-style bootstrap method call, refactor & simplify APIs for rewriter constantPoolOop, remove sun.dyn.CallSiteImpl
Reviewed-by: ?
rev 1022 : 6829192: JSR 292 needs to support 64-bit x86
Summary: changes for method handles and invokedynamic
Reviewed-by: ?, ?
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/x86/vm/templateTable_x86_32.cpp
+++ new/src/cpu/x86/vm/templateTable_x86_32.cpp
1 1 /*
2 2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 #include "incls/_precompiled.incl"
26 26 #include "incls/_templateTable_x86_32.cpp.incl"
27 27
28 28 #ifndef CC_INTERP
29 29 #define __ _masm->
30 30
31 31 //----------------------------------------------------------------------------------------------------
32 32 // Platform-dependent initialization
33 33
34 34 void TemplateTable::pd_initialize() {
35 35 // No i486 specific initialization
36 36 }
37 37
38 38 //----------------------------------------------------------------------------------------------------
39 39 // Address computation
40 40
41 41 // local variables
42 42 static inline Address iaddress(int n) {
43 43 return Address(rdi, Interpreter::local_offset_in_bytes(n));
44 44 }
45 45
46 46 static inline Address laddress(int n) { return iaddress(n + 1); }
47 47 static inline Address haddress(int n) { return iaddress(n + 0); }
48 48 static inline Address faddress(int n) { return iaddress(n); }
49 49 static inline Address daddress(int n) { return laddress(n); }
50 50 static inline Address aaddress(int n) { return iaddress(n); }
51 51
52 52 static inline Address iaddress(Register r) {
53 53 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::value_offset_in_bytes());
54 54 }
55 55 static inline Address laddress(Register r) {
56 56 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(1));
57 57 }
58 58 static inline Address haddress(Register r) {
59 59 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
60 60 }
61 61
62 62 static inline Address faddress(Register r) { return iaddress(r); };
63 63 static inline Address daddress(Register r) {
64 64 assert(!TaggedStackInterpreter, "This doesn't work");
65 65 return laddress(r);
66 66 };
67 67 static inline Address aaddress(Register r) { return iaddress(r); };
68 68
69 69 // expression stack
70 70 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
71 71 // data beyond the rsp which is potentially unsafe in an MT environment;
72 72 // an interrupt may overwrite that data.)
73 73 static inline Address at_rsp () {
74 74 return Address(rsp, 0);
75 75 }
76 76
77 77 // At top of Java expression stack which may be different than rsp(). It
78 78 // isn't for category 1 objects.
79 79 static inline Address at_tos () {
80 80 Address tos = Address(rsp, Interpreter::expr_offset_in_bytes(0));
81 81 return tos;
82 82 }
83 83
84 84 static inline Address at_tos_p1() {
85 85 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
86 86 }
87 87
88 88 static inline Address at_tos_p2() {
89 89 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
90 90 }
91 91
92 92 // Condition conversion
93 93 static Assembler::Condition j_not(TemplateTable::Condition cc) {
94 94 switch (cc) {
95 95 case TemplateTable::equal : return Assembler::notEqual;
96 96 case TemplateTable::not_equal : return Assembler::equal;
97 97 case TemplateTable::less : return Assembler::greaterEqual;
98 98 case TemplateTable::less_equal : return Assembler::greater;
99 99 case TemplateTable::greater : return Assembler::lessEqual;
100 100 case TemplateTable::greater_equal: return Assembler::less;
101 101 }
102 102 ShouldNotReachHere();
103 103 return Assembler::zero;
104 104 }
105 105
106 106
107 107 //----------------------------------------------------------------------------------------------------
108 108 // Miscelaneous helper routines
109 109
110 110 // Store an oop (or NULL) at the address described by obj.
111 111 // If val == noreg this means store a NULL
112 112
113 113 static void do_oop_store(InterpreterMacroAssembler* _masm,
114 114 Address obj,
115 115 Register val,
116 116 BarrierSet::Name barrier,
117 117 bool precise) {
118 118 assert(val == noreg || val == rax, "parameter is just for looks");
119 119 switch (barrier) {
120 120 #ifndef SERIALGC
121 121 case BarrierSet::G1SATBCT:
122 122 case BarrierSet::G1SATBCTLogging:
123 123 {
124 124 // flatten object address if needed
125 125 // We do it regardless of precise because we need the registers
126 126 if (obj.index() == noreg && obj.disp() == 0) {
127 127 if (obj.base() != rdx) {
128 128 __ movl(rdx, obj.base());
129 129 }
130 130 } else {
131 131 __ leal(rdx, obj);
132 132 }
133 133 __ get_thread(rcx);
134 134 __ save_bcp();
135 135 __ g1_write_barrier_pre(rdx, rcx, rsi, rbx, val != noreg);
136 136
137 137 // Do the actual store
138 138 // noreg means NULL
139 139 if (val == noreg) {
140 140 __ movptr(Address(rdx, 0), NULL_WORD);
141 141 // No post barrier for NULL
142 142 } else {
143 143 __ movl(Address(rdx, 0), val);
144 144 __ g1_write_barrier_post(rdx, rax, rcx, rbx, rsi);
145 145 }
146 146 __ restore_bcp();
147 147
148 148 }
149 149 break;
150 150 #endif // SERIALGC
151 151 case BarrierSet::CardTableModRef:
152 152 case BarrierSet::CardTableExtension:
153 153 {
154 154 if (val == noreg) {
155 155 __ movptr(obj, NULL_WORD);
156 156 } else {
157 157 __ movl(obj, val);
158 158 // flatten object address if needed
159 159 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
160 160 __ store_check(obj.base());
161 161 } else {
162 162 __ leal(rdx, obj);
163 163 __ store_check(rdx);
164 164 }
165 165 }
166 166 }
167 167 break;
168 168 case BarrierSet::ModRef:
169 169 case BarrierSet::Other:
170 170 if (val == noreg) {
171 171 __ movptr(obj, NULL_WORD);
172 172 } else {
173 173 __ movl(obj, val);
174 174 }
175 175 break;
176 176 default :
177 177 ShouldNotReachHere();
178 178
179 179 }
180 180 }
181 181
182 182 Address TemplateTable::at_bcp(int offset) {
183 183 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
184 184 return Address(rsi, offset);
185 185 }
186 186
187 187
188 188 void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc,
189 189 Register scratch,
190 190 bool load_bc_into_scratch/*=true*/) {
191 191
192 192 if (!RewriteBytecodes) return;
193 193 // the pair bytecodes have already done the load.
194 194 if (load_bc_into_scratch) {
195 195 __ movl(bc, bytecode);
196 196 }
197 197 Label patch_done;
198 198 if (JvmtiExport::can_post_breakpoint()) {
199 199 Label fast_patch;
200 200 // if a breakpoint is present we can't rewrite the stream directly
201 201 __ movzbl(scratch, at_bcp(0));
202 202 __ cmpl(scratch, Bytecodes::_breakpoint);
203 203 __ jcc(Assembler::notEqual, fast_patch);
204 204 __ get_method(scratch);
205 205 // Let breakpoint table handling rewrite to quicker bytecode
206 206 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, rsi, bc);
207 207 #ifndef ASSERT
208 208 __ jmpb(patch_done);
209 209 #else
210 210 __ jmp(patch_done);
211 211 #endif
212 212 __ bind(fast_patch);
213 213 }
214 214 #ifdef ASSERT
215 215 Label okay;
216 216 __ load_unsigned_byte(scratch, at_bcp(0));
217 217 __ cmpl(scratch, (int)Bytecodes::java_code(bytecode));
218 218 __ jccb(Assembler::equal, okay);
219 219 __ cmpl(scratch, bc);
220 220 __ jcc(Assembler::equal, okay);
221 221 __ stop("patching the wrong bytecode");
222 222 __ bind(okay);
223 223 #endif
224 224 // patch bytecode
225 225 __ movb(at_bcp(0), bc);
226 226 __ bind(patch_done);
227 227 }
228 228
229 229 //----------------------------------------------------------------------------------------------------
230 230 // Individual instructions
231 231
232 232 void TemplateTable::nop() {
233 233 transition(vtos, vtos);
234 234 // nothing to do
235 235 }
236 236
237 237 void TemplateTable::shouldnotreachhere() {
238 238 transition(vtos, vtos);
239 239 __ stop("shouldnotreachhere bytecode");
240 240 }
241 241
242 242
243 243
244 244 void TemplateTable::aconst_null() {
245 245 transition(vtos, atos);
246 246 __ xorptr(rax, rax);
247 247 }
248 248
249 249
250 250 void TemplateTable::iconst(int value) {
251 251 transition(vtos, itos);
252 252 if (value == 0) {
253 253 __ xorptr(rax, rax);
254 254 } else {
255 255 __ movptr(rax, value);
256 256 }
257 257 }
258 258
259 259
260 260 void TemplateTable::lconst(int value) {
261 261 transition(vtos, ltos);
262 262 if (value == 0) {
263 263 __ xorptr(rax, rax);
264 264 } else {
265 265 __ movptr(rax, value);
266 266 }
267 267 assert(value >= 0, "check this code");
268 268 __ xorptr(rdx, rdx);
269 269 }
270 270
271 271
272 272 void TemplateTable::fconst(int value) {
273 273 transition(vtos, ftos);
274 274 if (value == 0) { __ fldz();
275 275 } else if (value == 1) { __ fld1();
276 276 } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
277 277 } else { ShouldNotReachHere();
278 278 }
279 279 }
280 280
281 281
282 282 void TemplateTable::dconst(int value) {
283 283 transition(vtos, dtos);
284 284 if (value == 0) { __ fldz();
285 285 } else if (value == 1) { __ fld1();
286 286 } else { ShouldNotReachHere();
287 287 }
288 288 }
289 289
290 290
291 291 void TemplateTable::bipush() {
292 292 transition(vtos, itos);
293 293 __ load_signed_byte(rax, at_bcp(1));
294 294 }
295 295
296 296
297 297 void TemplateTable::sipush() {
298 298 transition(vtos, itos);
299 299 __ load_unsigned_short(rax, at_bcp(1));
300 300 __ bswapl(rax);
301 301 __ sarl(rax, 16);
302 302 }
303 303
304 304 void TemplateTable::ldc(bool wide) {
305 305 transition(vtos, vtos);
306 306 Label call_ldc, notFloat, notClass, Done;
307 307
308 308 if (wide) {
309 309 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
310 310 } else {
311 311 __ load_unsigned_byte(rbx, at_bcp(1));
312 312 }
313 313 __ get_cpool_and_tags(rcx, rax);
314 314 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
315 315 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
316 316
317 317 // get type
318 318 __ xorptr(rdx, rdx);
319 319 __ movb(rdx, Address(rax, rbx, Address::times_1, tags_offset));
320 320
321 321 // unresolved string - get the resolved string
322 322 __ cmpl(rdx, JVM_CONSTANT_UnresolvedString);
323 323 __ jccb(Assembler::equal, call_ldc);
324 324
325 325 // unresolved class - get the resolved class
326 326 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
327 327 __ jccb(Assembler::equal, call_ldc);
328 328
329 329 // unresolved class in error (resolution failed) - call into runtime
330 330 // so that the same error from first resolution attempt is thrown.
331 331 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
332 332 __ jccb(Assembler::equal, call_ldc);
333 333
334 334 // resolved class - need to call vm to get java mirror of the class
335 335 __ cmpl(rdx, JVM_CONSTANT_Class);
336 336 __ jcc(Assembler::notEqual, notClass);
337 337
338 338 __ bind(call_ldc);
339 339 __ movl(rcx, wide);
340 340 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rcx);
341 341 __ push(atos);
342 342 __ jmp(Done);
343 343
344 344 __ bind(notClass);
345 345 __ cmpl(rdx, JVM_CONSTANT_Float);
346 346 __ jccb(Assembler::notEqual, notFloat);
347 347 // ftos
348 348 __ fld_s( Address(rcx, rbx, Address::times_ptr, base_offset));
349 349 __ push(ftos);
350 350 __ jmp(Done);
351 351
352 352 __ bind(notFloat);
353 353 #ifdef ASSERT
354 354 { Label L;
355 355 __ cmpl(rdx, JVM_CONSTANT_Integer);
356 356 __ jcc(Assembler::equal, L);
357 357 __ cmpl(rdx, JVM_CONSTANT_String);
358 358 __ jcc(Assembler::equal, L);
359 359 __ stop("unexpected tag type in ldc");
360 360 __ bind(L);
361 361 }
362 362 #endif
363 363 Label isOop;
364 364 // atos and itos
365 365 // String is only oop type we will see here
366 366 __ cmpl(rdx, JVM_CONSTANT_String);
367 367 __ jccb(Assembler::equal, isOop);
368 368 __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
369 369 __ push(itos);
370 370 __ jmp(Done);
371 371 __ bind(isOop);
372 372 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
373 373 __ push(atos);
374 374
375 375 if (VerifyOops) {
376 376 __ verify_oop(rax);
377 377 }
378 378 __ bind(Done);
379 379 }
380 380
381 381 void TemplateTable::ldc2_w() {
382 382 transition(vtos, vtos);
383 383 Label Long, Done;
384 384 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
385 385
386 386 __ get_cpool_and_tags(rcx, rax);
387 387 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
388 388 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
389 389
390 390 // get type
391 391 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), JVM_CONSTANT_Double);
392 392 __ jccb(Assembler::notEqual, Long);
393 393 // dtos
394 394 __ fld_d( Address(rcx, rbx, Address::times_ptr, base_offset));
395 395 __ push(dtos);
396 396 __ jmpb(Done);
397 397
398 398 __ bind(Long);
399 399 // ltos
400 400 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
401 401 NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
402 402
403 403 __ push(ltos);
404 404
405 405 __ bind(Done);
406 406 }
407 407
408 408
409 409 void TemplateTable::locals_index(Register reg, int offset) {
410 410 __ load_unsigned_byte(reg, at_bcp(offset));
411 411 __ negptr(reg);
412 412 }
413 413
414 414
415 415 void TemplateTable::iload() {
416 416 transition(vtos, itos);
417 417 if (RewriteFrequentPairs) {
418 418 Label rewrite, done;
419 419
420 420 // get next byte
421 421 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
422 422 // if _iload, wait to rewrite to iload2. We only want to rewrite the
423 423 // last two iloads in a pair. Comparing against fast_iload means that
424 424 // the next bytecode is neither an iload or a caload, and therefore
425 425 // an iload pair.
426 426 __ cmpl(rbx, Bytecodes::_iload);
427 427 __ jcc(Assembler::equal, done);
428 428
429 429 __ cmpl(rbx, Bytecodes::_fast_iload);
430 430 __ movl(rcx, Bytecodes::_fast_iload2);
431 431 __ jccb(Assembler::equal, rewrite);
432 432
433 433 // if _caload, rewrite to fast_icaload
434 434 __ cmpl(rbx, Bytecodes::_caload);
435 435 __ movl(rcx, Bytecodes::_fast_icaload);
436 436 __ jccb(Assembler::equal, rewrite);
437 437
438 438 // rewrite so iload doesn't check again.
439 439 __ movl(rcx, Bytecodes::_fast_iload);
440 440
441 441 // rewrite
442 442 // rcx: fast bytecode
443 443 __ bind(rewrite);
444 444 patch_bytecode(Bytecodes::_iload, rcx, rbx, false);
445 445 __ bind(done);
446 446 }
447 447
448 448 // Get the local value into tos
449 449 locals_index(rbx);
450 450 __ movl(rax, iaddress(rbx));
451 451 debug_only(__ verify_local_tag(frame::TagValue, rbx));
452 452 }
453 453
454 454
455 455 void TemplateTable::fast_iload2() {
456 456 transition(vtos, itos);
457 457 locals_index(rbx);
458 458 __ movl(rax, iaddress(rbx));
459 459 debug_only(__ verify_local_tag(frame::TagValue, rbx));
460 460 __ push(itos);
461 461 locals_index(rbx, 3);
462 462 __ movl(rax, iaddress(rbx));
463 463 debug_only(__ verify_local_tag(frame::TagValue, rbx));
464 464 }
465 465
466 466 void TemplateTable::fast_iload() {
467 467 transition(vtos, itos);
468 468 locals_index(rbx);
469 469 __ movl(rax, iaddress(rbx));
470 470 debug_only(__ verify_local_tag(frame::TagValue, rbx));
471 471 }
472 472
473 473
474 474 void TemplateTable::lload() {
475 475 transition(vtos, ltos);
476 476 locals_index(rbx);
477 477 __ movptr(rax, laddress(rbx));
478 478 NOT_LP64(__ movl(rdx, haddress(rbx)));
479 479 debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
480 480 }
481 481
482 482
483 483 void TemplateTable::fload() {
484 484 transition(vtos, ftos);
485 485 locals_index(rbx);
486 486 __ fld_s(faddress(rbx));
487 487 debug_only(__ verify_local_tag(frame::TagValue, rbx));
488 488 }
489 489
490 490
491 491 void TemplateTable::dload() {
492 492 transition(vtos, dtos);
493 493 locals_index(rbx);
494 494 if (TaggedStackInterpreter) {
495 495 // Get double out of locals array, onto temp stack and load with
496 496 // float instruction into ST0
497 497 __ movl(rax, laddress(rbx));
498 498 __ movl(rdx, haddress(rbx));
499 499 __ push(rdx); // push hi first
500 500 __ push(rax);
501 501 __ fld_d(Address(rsp, 0));
502 502 __ addptr(rsp, 2*wordSize);
503 503 debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
504 504 } else {
505 505 __ fld_d(daddress(rbx));
506 506 }
507 507 }
508 508
509 509
510 510 void TemplateTable::aload() {
511 511 transition(vtos, atos);
512 512 locals_index(rbx);
513 513 __ movptr(rax, aaddress(rbx));
514 514 debug_only(__ verify_local_tag(frame::TagReference, rbx));
515 515 }
516 516
517 517
518 518 void TemplateTable::locals_index_wide(Register reg) {
519 519 __ movl(reg, at_bcp(2));
520 520 __ bswapl(reg);
521 521 __ shrl(reg, 16);
522 522 __ negptr(reg);
523 523 }
524 524
525 525
526 526 void TemplateTable::wide_iload() {
527 527 transition(vtos, itos);
528 528 locals_index_wide(rbx);
529 529 __ movl(rax, iaddress(rbx));
530 530 debug_only(__ verify_local_tag(frame::TagValue, rbx));
531 531 }
532 532
533 533
534 534 void TemplateTable::wide_lload() {
535 535 transition(vtos, ltos);
536 536 locals_index_wide(rbx);
537 537 __ movptr(rax, laddress(rbx));
538 538 NOT_LP64(__ movl(rdx, haddress(rbx)));
539 539 debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
540 540 }
541 541
542 542
543 543 void TemplateTable::wide_fload() {
544 544 transition(vtos, ftos);
545 545 locals_index_wide(rbx);
546 546 __ fld_s(faddress(rbx));
547 547 debug_only(__ verify_local_tag(frame::TagValue, rbx));
548 548 }
549 549
550 550
551 551 void TemplateTable::wide_dload() {
552 552 transition(vtos, dtos);
553 553 locals_index_wide(rbx);
554 554 if (TaggedStackInterpreter) {
555 555 // Get double out of locals array, onto temp stack and load with
556 556 // float instruction into ST0
557 557 __ movl(rax, laddress(rbx));
558 558 __ movl(rdx, haddress(rbx));
559 559 __ push(rdx); // push hi first
560 560 __ push(rax);
561 561 __ fld_d(Address(rsp, 0));
562 562 __ addl(rsp, 2*wordSize);
563 563 debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
564 564 } else {
565 565 __ fld_d(daddress(rbx));
566 566 }
567 567 }
568 568
569 569
570 570 void TemplateTable::wide_aload() {
571 571 transition(vtos, atos);
572 572 locals_index_wide(rbx);
573 573 __ movptr(rax, aaddress(rbx));
574 574 debug_only(__ verify_local_tag(frame::TagReference, rbx));
575 575 }
576 576
577 577 void TemplateTable::index_check(Register array, Register index) {
578 578 // Pop ptr into array
579 579 __ pop_ptr(array);
580 580 index_check_without_pop(array, index);
581 581 }
582 582
583 583 void TemplateTable::index_check_without_pop(Register array, Register index) {
584 584 // destroys rbx,
585 585 // check array
586 586 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
587 587 LP64_ONLY(__ movslq(index, index));
588 588 // check index
589 589 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
590 590 if (index != rbx) {
591 591 // ??? convention: move aberrant index into rbx, for exception message
592 592 assert(rbx != array, "different registers");
593 593 __ mov(rbx, index);
594 594 }
595 595 __ jump_cc(Assembler::aboveEqual,
596 596 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
597 597 }
598 598
599 599
600 600 void TemplateTable::iaload() {
601 601 transition(itos, itos);
602 602 // rdx: array
603 603 index_check(rdx, rax); // kills rbx,
604 604 // rax,: index
605 605 __ movl(rax, Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)));
606 606 }
607 607
608 608
609 609 void TemplateTable::laload() {
610 610 transition(itos, ltos);
611 611 // rax,: index
612 612 // rdx: array
613 613 index_check(rdx, rax);
614 614 __ mov(rbx, rax);
615 615 // rbx,: index
616 616 __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
617 617 NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
618 618 }
619 619
620 620
621 621 void TemplateTable::faload() {
622 622 transition(itos, ftos);
623 623 // rdx: array
624 624 index_check(rdx, rax); // kills rbx,
625 625 // rax,: index
626 626 __ fld_s(Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
627 627 }
628 628
629 629
630 630 void TemplateTable::daload() {
631 631 transition(itos, dtos);
632 632 // rdx: array
633 633 index_check(rdx, rax); // kills rbx,
634 634 // rax,: index
635 635 __ fld_d(Address(rdx, rax, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
636 636 }
637 637
638 638
639 639 void TemplateTable::aaload() {
640 640 transition(itos, atos);
641 641 // rdx: array
642 642 index_check(rdx, rax); // kills rbx,
643 643 // rax,: index
644 644 __ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
645 645 }
646 646
647 647
648 648 void TemplateTable::baload() {
649 649 transition(itos, itos);
650 650 // rdx: array
651 651 index_check(rdx, rax); // kills rbx,
652 652 // rax,: index
653 653 // can do better code for P5 - fix this at some point
654 654 __ load_signed_byte(rbx, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
655 655 __ mov(rax, rbx);
656 656 }
657 657
658 658
659 659 void TemplateTable::caload() {
660 660 transition(itos, itos);
661 661 // rdx: array
662 662 index_check(rdx, rax); // kills rbx,
663 663 // rax,: index
664 664 // can do better code for P5 - may want to improve this at some point
665 665 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
666 666 __ mov(rax, rbx);
667 667 }
668 668
669 669 // iload followed by caload frequent pair
670 670 void TemplateTable::fast_icaload() {
671 671 transition(vtos, itos);
672 672 // load index out of locals
673 673 locals_index(rbx);
674 674 __ movl(rax, iaddress(rbx));
675 675 debug_only(__ verify_local_tag(frame::TagValue, rbx));
676 676
677 677 // rdx: array
678 678 index_check(rdx, rax);
679 679 // rax,: index
680 680 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
681 681 __ mov(rax, rbx);
682 682 }
683 683
684 684 void TemplateTable::saload() {
685 685 transition(itos, itos);
686 686 // rdx: array
687 687 index_check(rdx, rax); // kills rbx,
688 688 // rax,: index
689 689 // can do better code for P5 - may want to improve this at some point
690 690 __ load_signed_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
691 691 __ mov(rax, rbx);
692 692 }
693 693
694 694
695 695 void TemplateTable::iload(int n) {
696 696 transition(vtos, itos);
697 697 __ movl(rax, iaddress(n));
698 698 debug_only(__ verify_local_tag(frame::TagValue, n));
699 699 }
700 700
701 701
702 702 void TemplateTable::lload(int n) {
703 703 transition(vtos, ltos);
704 704 __ movptr(rax, laddress(n));
705 705 NOT_LP64(__ movptr(rdx, haddress(n)));
706 706 debug_only(__ verify_local_tag(frame::TagCategory2, n));
707 707 }
708 708
709 709
710 710 void TemplateTable::fload(int n) {
711 711 transition(vtos, ftos);
712 712 __ fld_s(faddress(n));
713 713 debug_only(__ verify_local_tag(frame::TagValue, n));
714 714 }
715 715
716 716
717 717 void TemplateTable::dload(int n) {
718 718 transition(vtos, dtos);
719 719 if (TaggedStackInterpreter) {
720 720 // Get double out of locals array, onto temp stack and load with
721 721 // float instruction into ST0
722 722 __ movl(rax, laddress(n));
723 723 __ movl(rdx, haddress(n));
724 724 __ push(rdx); // push hi first
725 725 __ push(rax);
726 726 __ fld_d(Address(rsp, 0));
727 727 __ addptr(rsp, 2*wordSize); // reset rsp
728 728 debug_only(__ verify_local_tag(frame::TagCategory2, n));
729 729 } else {
730 730 __ fld_d(daddress(n));
731 731 }
732 732 }
733 733
734 734
735 735 void TemplateTable::aload(int n) {
736 736 transition(vtos, atos);
737 737 __ movptr(rax, aaddress(n));
738 738 debug_only(__ verify_local_tag(frame::TagReference, n));
739 739 }
740 740
741 741
742 742 void TemplateTable::aload_0() {
743 743 transition(vtos, atos);
744 744 // According to bytecode histograms, the pairs:
745 745 //
746 746 // _aload_0, _fast_igetfield
747 747 // _aload_0, _fast_agetfield
748 748 // _aload_0, _fast_fgetfield
749 749 //
750 750 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
751 751 // bytecode checks if the next bytecode is either _fast_igetfield,
752 752 // _fast_agetfield or _fast_fgetfield and then rewrites the
753 753 // current bytecode into a pair bytecode; otherwise it rewrites the current
754 754 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
755 755 //
756 756 // Note: If the next bytecode is _getfield, the rewrite must be delayed,
757 757 // otherwise we may miss an opportunity for a pair.
758 758 //
759 759 // Also rewrite frequent pairs
760 760 // aload_0, aload_1
761 761 // aload_0, iload_1
762 762 // These bytecodes with a small amount of code are most profitable to rewrite
763 763 if (RewriteFrequentPairs) {
764 764 Label rewrite, done;
765 765 // get next byte
766 766 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
767 767
768 768 // do actual aload_0
769 769 aload(0);
770 770
771 771 // if _getfield then wait with rewrite
772 772 __ cmpl(rbx, Bytecodes::_getfield);
773 773 __ jcc(Assembler::equal, done);
774 774
775 775 // if _igetfield then reqrite to _fast_iaccess_0
776 776 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
777 777 __ cmpl(rbx, Bytecodes::_fast_igetfield);
778 778 __ movl(rcx, Bytecodes::_fast_iaccess_0);
779 779 __ jccb(Assembler::equal, rewrite);
780 780
781 781 // if _agetfield then reqrite to _fast_aaccess_0
782 782 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
783 783 __ cmpl(rbx, Bytecodes::_fast_agetfield);
784 784 __ movl(rcx, Bytecodes::_fast_aaccess_0);
785 785 __ jccb(Assembler::equal, rewrite);
786 786
787 787 // if _fgetfield then reqrite to _fast_faccess_0
788 788 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
789 789 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
790 790 __ movl(rcx, Bytecodes::_fast_faccess_0);
791 791 __ jccb(Assembler::equal, rewrite);
792 792
793 793 // else rewrite to _fast_aload0
794 794 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
795 795 __ movl(rcx, Bytecodes::_fast_aload_0);
796 796
797 797 // rewrite
798 798 // rcx: fast bytecode
799 799 __ bind(rewrite);
800 800 patch_bytecode(Bytecodes::_aload_0, rcx, rbx, false);
801 801
802 802 __ bind(done);
803 803 } else {
804 804 aload(0);
805 805 }
806 806 }
807 807
808 808 void TemplateTable::istore() {
809 809 transition(itos, vtos);
810 810 locals_index(rbx);
811 811 __ movl(iaddress(rbx), rax);
812 812 __ tag_local(frame::TagValue, rbx);
813 813 }
814 814
815 815
816 816 void TemplateTable::lstore() {
817 817 transition(ltos, vtos);
818 818 locals_index(rbx);
819 819 __ movptr(laddress(rbx), rax);
820 820 NOT_LP64(__ movptr(haddress(rbx), rdx));
821 821 __ tag_local(frame::TagCategory2, rbx);
822 822 }
823 823
824 824
825 825 void TemplateTable::fstore() {
826 826 transition(ftos, vtos);
827 827 locals_index(rbx);
828 828 __ fstp_s(faddress(rbx));
829 829 __ tag_local(frame::TagValue, rbx);
830 830 }
831 831
832 832
833 833 void TemplateTable::dstore() {
834 834 transition(dtos, vtos);
835 835 locals_index(rbx);
836 836 if (TaggedStackInterpreter) {
837 837 // Store double on stack and reload into locals nonadjacently
838 838 __ subptr(rsp, 2 * wordSize);
839 839 __ fstp_d(Address(rsp, 0));
840 840 __ pop(rax);
841 841 __ pop(rdx);
842 842 __ movptr(laddress(rbx), rax);
843 843 __ movptr(haddress(rbx), rdx);
844 844 __ tag_local(frame::TagCategory2, rbx);
845 845 } else {
846 846 __ fstp_d(daddress(rbx));
847 847 }
848 848 }
849 849
850 850
851 851 void TemplateTable::astore() {
852 852 transition(vtos, vtos);
853 853 __ pop_ptr(rax, rdx); // will need to pop tag too
854 854 locals_index(rbx);
855 855 __ movptr(aaddress(rbx), rax);
856 856 __ tag_local(rdx, rbx); // need to store same tag in local may be returnAddr
857 857 }
858 858
859 859
860 860 void TemplateTable::wide_istore() {
861 861 transition(vtos, vtos);
862 862 __ pop_i(rax);
863 863 locals_index_wide(rbx);
864 864 __ movl(iaddress(rbx), rax);
865 865 __ tag_local(frame::TagValue, rbx);
866 866 }
867 867
868 868
869 869 void TemplateTable::wide_lstore() {
870 870 transition(vtos, vtos);
871 871 __ pop_l(rax, rdx);
872 872 locals_index_wide(rbx);
873 873 __ movptr(laddress(rbx), rax);
874 874 NOT_LP64(__ movl(haddress(rbx), rdx));
875 875 __ tag_local(frame::TagCategory2, rbx);
876 876 }
877 877
878 878
879 879 void TemplateTable::wide_fstore() {
880 880 wide_istore();
881 881 }
882 882
883 883
884 884 void TemplateTable::wide_dstore() {
885 885 wide_lstore();
886 886 }
887 887
888 888
889 889 void TemplateTable::wide_astore() {
890 890 transition(vtos, vtos);
891 891 __ pop_ptr(rax, rdx);
892 892 locals_index_wide(rbx);
893 893 __ movptr(aaddress(rbx), rax);
894 894 __ tag_local(rdx, rbx);
895 895 }
896 896
897 897
898 898 void TemplateTable::iastore() {
899 899 transition(itos, vtos);
900 900 __ pop_i(rbx);
901 901 // rax,: value
902 902 // rdx: array
903 903 index_check(rdx, rbx); // prefer index in rbx,
904 904 // rbx,: index
905 905 __ movl(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)), rax);
906 906 }
907 907
908 908
909 909 void TemplateTable::lastore() {
910 910 transition(ltos, vtos);
911 911 __ pop_i(rbx);
912 912 // rax,: low(value)
913 913 // rcx: array
914 914 // rdx: high(value)
915 915 index_check(rcx, rbx); // prefer index in rbx,
916 916 // rbx,: index
917 917 __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
918 918 NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
919 919 }
920 920
921 921
922 922 void TemplateTable::fastore() {
923 923 transition(ftos, vtos);
924 924 __ pop_i(rbx);
925 925 // rdx: array
926 926 // st0: value
927 927 index_check(rdx, rbx); // prefer index in rbx,
928 928 // rbx,: index
929 929 __ fstp_s(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
930 930 }
931 931
932 932
933 933 void TemplateTable::dastore() {
934 934 transition(dtos, vtos);
935 935 __ pop_i(rbx);
936 936 // rdx: array
937 937 // st0: value
938 938 index_check(rdx, rbx); // prefer index in rbx,
939 939 // rbx,: index
940 940 __ fstp_d(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
941 941 }
942 942
943 943
944 944 void TemplateTable::aastore() {
945 945 Label is_null, ok_is_subtype, done;
946 946 transition(vtos, vtos);
947 947 // stack: ..., array, index, value
948 948 __ movptr(rax, at_tos()); // Value
949 949 __ movl(rcx, at_tos_p1()); // Index
950 950 __ movptr(rdx, at_tos_p2()); // Array
951 951
952 952 Address element_address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
953 953 index_check_without_pop(rdx, rcx); // kills rbx,
954 954 // do array store check - check for NULL value first
955 955 __ testptr(rax, rax);
956 956 __ jcc(Assembler::zero, is_null);
957 957
958 958 // Move subklass into EBX
959 959 __ movptr(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
960 960 // Move superklass into EAX
961 961 __ movptr(rax, Address(rdx, oopDesc::klass_offset_in_bytes()));
962 962 __ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes()));
963 963 // Compress array+index*wordSize+12 into a single register. Frees ECX.
964 964 __ lea(rdx, element_address);
965 965
966 966 // Generate subtype check. Blows ECX. Resets EDI to locals.
967 967 // Superklass in EAX. Subklass in EBX.
968 968 __ gen_subtype_check( rbx, ok_is_subtype );
969 969
970 970 // Come here on failure
971 971 // object is at TOS
972 972 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
973 973
974 974 // Come here on success
975 975 __ bind(ok_is_subtype);
976 976
977 977 // Get the value to store
978 978 __ movptr(rax, at_rsp());
979 979 // and store it with appropriate barrier
980 980 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
981 981
982 982 __ jmp(done);
983 983
984 984 // Have a NULL in EAX, EDX=array, ECX=index. Store NULL at ary[idx]
985 985 __ bind(is_null);
986 986 __ profile_null_seen(rbx);
987 987
988 988 // Store NULL, (noreg means NULL to do_oop_store)
989 989 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
990 990
991 991 // Pop stack arguments
992 992 __ bind(done);
993 993 __ addptr(rsp, 3 * Interpreter::stackElementSize());
994 994 }
995 995
996 996
997 997 void TemplateTable::bastore() {
998 998 transition(itos, vtos);
999 999 __ pop_i(rbx);
1000 1000 // rax,: value
1001 1001 // rdx: array
1002 1002 index_check(rdx, rbx); // prefer index in rbx,
1003 1003 // rbx,: index
1004 1004 __ movb(Address(rdx, rbx, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)), rax);
1005 1005 }
1006 1006
1007 1007
1008 1008 void TemplateTable::castore() {
1009 1009 transition(itos, vtos);
1010 1010 __ pop_i(rbx);
1011 1011 // rax,: value
1012 1012 // rdx: array
1013 1013 index_check(rdx, rbx); // prefer index in rbx,
1014 1014 // rbx,: index
1015 1015 __ movw(Address(rdx, rbx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)), rax);
1016 1016 }
1017 1017
1018 1018
1019 1019 void TemplateTable::sastore() {
1020 1020 castore();
1021 1021 }
1022 1022
1023 1023
1024 1024 void TemplateTable::istore(int n) {
1025 1025 transition(itos, vtos);
1026 1026 __ movl(iaddress(n), rax);
1027 1027 __ tag_local(frame::TagValue, n);
1028 1028 }
1029 1029
1030 1030
1031 1031 void TemplateTable::lstore(int n) {
1032 1032 transition(ltos, vtos);
1033 1033 __ movptr(laddress(n), rax);
1034 1034 NOT_LP64(__ movptr(haddress(n), rdx));
1035 1035 __ tag_local(frame::TagCategory2, n);
1036 1036 }
1037 1037
1038 1038
1039 1039 void TemplateTable::fstore(int n) {
1040 1040 transition(ftos, vtos);
1041 1041 __ fstp_s(faddress(n));
1042 1042 __ tag_local(frame::TagValue, n);
1043 1043 }
1044 1044
1045 1045
1046 1046 void TemplateTable::dstore(int n) {
1047 1047 transition(dtos, vtos);
1048 1048 if (TaggedStackInterpreter) {
1049 1049 __ subptr(rsp, 2 * wordSize);
1050 1050 __ fstp_d(Address(rsp, 0));
1051 1051 __ pop(rax);
1052 1052 __ pop(rdx);
1053 1053 __ movl(laddress(n), rax);
1054 1054 __ movl(haddress(n), rdx);
1055 1055 __ tag_local(frame::TagCategory2, n);
1056 1056 } else {
1057 1057 __ fstp_d(daddress(n));
1058 1058 }
1059 1059 }
1060 1060
1061 1061
1062 1062 void TemplateTable::astore(int n) {
1063 1063 transition(vtos, vtos);
1064 1064 __ pop_ptr(rax, rdx);
1065 1065 __ movptr(aaddress(n), rax);
1066 1066 __ tag_local(rdx, n);
1067 1067 }
1068 1068
1069 1069
1070 1070 void TemplateTable::pop() {
1071 1071 transition(vtos, vtos);
1072 1072 __ addptr(rsp, Interpreter::stackElementSize());
1073 1073 }
1074 1074
1075 1075
1076 1076 void TemplateTable::pop2() {
1077 1077 transition(vtos, vtos);
1078 1078 __ addptr(rsp, 2*Interpreter::stackElementSize());
1079 1079 }
1080 1080
1081 1081
1082 1082 void TemplateTable::dup() {
1083 1083 transition(vtos, vtos);
1084 1084 // stack: ..., a
1085 1085 __ load_ptr_and_tag(0, rax, rdx);
1086 1086 __ push_ptr(rax, rdx);
1087 1087 // stack: ..., a, a
1088 1088 }
1089 1089
1090 1090
1091 1091 void TemplateTable::dup_x1() {
1092 1092 transition(vtos, vtos);
1093 1093 // stack: ..., a, b
1094 1094 __ load_ptr_and_tag(0, rax, rdx); // load b
1095 1095 __ load_ptr_and_tag(1, rcx, rbx); // load a
1096 1096 __ store_ptr_and_tag(1, rax, rdx); // store b
1097 1097 __ store_ptr_and_tag(0, rcx, rbx); // store a
1098 1098 __ push_ptr(rax, rdx); // push b
1099 1099 // stack: ..., b, a, b
1100 1100 }
1101 1101
1102 1102
1103 1103 void TemplateTable::dup_x2() {
1104 1104 transition(vtos, vtos);
1105 1105 // stack: ..., a, b, c
1106 1106 __ load_ptr_and_tag(0, rax, rdx); // load c
1107 1107 __ load_ptr_and_tag(2, rcx, rbx); // load a
1108 1108 __ store_ptr_and_tag(2, rax, rdx); // store c in a
1109 1109 __ push_ptr(rax, rdx); // push c
1110 1110 // stack: ..., c, b, c, c
1111 1111 __ load_ptr_and_tag(2, rax, rdx); // load b
1112 1112 __ store_ptr_and_tag(2, rcx, rbx); // store a in b
1113 1113 // stack: ..., c, a, c, c
1114 1114 __ store_ptr_and_tag(1, rax, rdx); // store b in c
1115 1115 // stack: ..., c, a, b, c
1116 1116 }
1117 1117
1118 1118
1119 1119 void TemplateTable::dup2() {
1120 1120 transition(vtos, vtos);
1121 1121 // stack: ..., a, b
1122 1122 __ load_ptr_and_tag(1, rax, rdx); // load a
1123 1123 __ push_ptr(rax, rdx); // push a
1124 1124 __ load_ptr_and_tag(1, rax, rdx); // load b
1125 1125 __ push_ptr(rax, rdx); // push b
1126 1126 // stack: ..., a, b, a, b
1127 1127 }
1128 1128
1129 1129
1130 1130 void TemplateTable::dup2_x1() {
1131 1131 transition(vtos, vtos);
1132 1132 // stack: ..., a, b, c
1133 1133 __ load_ptr_and_tag(0, rcx, rbx); // load c
1134 1134 __ load_ptr_and_tag(1, rax, rdx); // load b
1135 1135 __ push_ptr(rax, rdx); // push b
1136 1136 __ push_ptr(rcx, rbx); // push c
1137 1137 // stack: ..., a, b, c, b, c
1138 1138 __ store_ptr_and_tag(3, rcx, rbx); // store c in b
1139 1139 // stack: ..., a, c, c, b, c
1140 1140 __ load_ptr_and_tag(4, rcx, rbx); // load a
1141 1141 __ store_ptr_and_tag(2, rcx, rbx); // store a in 2nd c
1142 1142 // stack: ..., a, c, a, b, c
1143 1143 __ store_ptr_and_tag(4, rax, rdx); // store b in a
1144 1144 // stack: ..., b, c, a, b, c
1145 1145 // stack: ..., b, c, a, b, c
1146 1146 }
1147 1147
1148 1148
1149 1149 void TemplateTable::dup2_x2() {
1150 1150 transition(vtos, vtos);
1151 1151 // stack: ..., a, b, c, d
1152 1152 __ load_ptr_and_tag(0, rcx, rbx); // load d
1153 1153 __ load_ptr_and_tag(1, rax, rdx); // load c
1154 1154 __ push_ptr(rax, rdx); // push c
1155 1155 __ push_ptr(rcx, rbx); // push d
1156 1156 // stack: ..., a, b, c, d, c, d
1157 1157 __ load_ptr_and_tag(4, rax, rdx); // load b
1158 1158 __ store_ptr_and_tag(2, rax, rdx); // store b in d
1159 1159 __ store_ptr_and_tag(4, rcx, rbx); // store d in b
1160 1160 // stack: ..., a, d, c, b, c, d
1161 1161 __ load_ptr_and_tag(5, rcx, rbx); // load a
1162 1162 __ load_ptr_and_tag(3, rax, rdx); // load c
1163 1163 __ store_ptr_and_tag(3, rcx, rbx); // store a in c
1164 1164 __ store_ptr_and_tag(5, rax, rdx); // store c in a
1165 1165 // stack: ..., c, d, a, b, c, d
1166 1166 // stack: ..., c, d, a, b, c, d
1167 1167 }
1168 1168
1169 1169
1170 1170 void TemplateTable::swap() {
1171 1171 transition(vtos, vtos);
1172 1172 // stack: ..., a, b
1173 1173 __ load_ptr_and_tag(1, rcx, rbx); // load a
1174 1174 __ load_ptr_and_tag(0, rax, rdx); // load b
1175 1175 __ store_ptr_and_tag(0, rcx, rbx); // store a in b
1176 1176 __ store_ptr_and_tag(1, rax, rdx); // store b in a
1177 1177 // stack: ..., b, a
1178 1178 }
1179 1179
1180 1180
1181 1181 void TemplateTable::iop2(Operation op) {
1182 1182 transition(itos, itos);
1183 1183 switch (op) {
1184 1184 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1185 1185 case sub : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1186 1186 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1187 1187 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1188 1188 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1189 1189 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1190 1190 case shl : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1191 1191 case shr : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1192 1192 case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1193 1193 default : ShouldNotReachHere();
1194 1194 }
1195 1195 }
1196 1196
1197 1197
1198 1198 void TemplateTable::lop2(Operation op) {
1199 1199 transition(ltos, ltos);
1200 1200 __ pop_l(rbx, rcx);
1201 1201 switch (op) {
1202 1202 case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1203 1203 case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1204 1204 __ mov(rax, rbx); __ mov(rdx, rcx); break;
1205 1205 case _and: __ andl(rax, rbx); __ andl(rdx, rcx); break;
1206 1206 case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1207 1207 case _xor: __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1208 1208 default : ShouldNotReachHere();
1209 1209 }
1210 1210 }
1211 1211
1212 1212
1213 1213 void TemplateTable::idiv() {
1214 1214 transition(itos, itos);
1215 1215 __ mov(rcx, rax);
1216 1216 __ pop_i(rax);
1217 1217 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1218 1218 // they are not equal, one could do a normal division (no correction
1219 1219 // needed), which may speed up this implementation for the common case.
1220 1220 // (see also JVM spec., p.243 & p.271)
1221 1221 __ corrected_idivl(rcx);
1222 1222 }
1223 1223
1224 1224
1225 1225 void TemplateTable::irem() {
1226 1226 transition(itos, itos);
1227 1227 __ mov(rcx, rax);
1228 1228 __ pop_i(rax);
1229 1229 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1230 1230 // they are not equal, one could do a normal division (no correction
1231 1231 // needed), which may speed up this implementation for the common case.
1232 1232 // (see also JVM spec., p.243 & p.271)
1233 1233 __ corrected_idivl(rcx);
1234 1234 __ mov(rax, rdx);
1235 1235 }
1236 1236
1237 1237
1238 1238 void TemplateTable::lmul() {
1239 1239 transition(ltos, ltos);
1240 1240 __ pop_l(rbx, rcx);
1241 1241 __ push(rcx); __ push(rbx);
1242 1242 __ push(rdx); __ push(rax);
1243 1243 __ lmul(2 * wordSize, 0);
1244 1244 __ addptr(rsp, 4 * wordSize); // take off temporaries
1245 1245 }
1246 1246
1247 1247
1248 1248 void TemplateTable::ldiv() {
1249 1249 transition(ltos, ltos);
1250 1250 __ pop_l(rbx, rcx);
1251 1251 __ push(rcx); __ push(rbx);
1252 1252 __ push(rdx); __ push(rax);
1253 1253 // check if y = 0
1254 1254 __ orl(rax, rdx);
1255 1255 __ jump_cc(Assembler::zero,
1256 1256 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1257 1257 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1258 1258 __ addptr(rsp, 4 * wordSize); // take off temporaries
1259 1259 }
1260 1260
1261 1261
1262 1262 void TemplateTable::lrem() {
1263 1263 transition(ltos, ltos);
1264 1264 __ pop_l(rbx, rcx);
1265 1265 __ push(rcx); __ push(rbx);
1266 1266 __ push(rdx); __ push(rax);
1267 1267 // check if y = 0
1268 1268 __ orl(rax, rdx);
1269 1269 __ jump_cc(Assembler::zero,
1270 1270 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1271 1271 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1272 1272 __ addptr(rsp, 4 * wordSize);
1273 1273 }
1274 1274
1275 1275
1276 1276 void TemplateTable::lshl() {
1277 1277 transition(itos, ltos);
1278 1278 __ movl(rcx, rax); // get shift count
1279 1279 __ pop_l(rax, rdx); // get shift value
1280 1280 __ lshl(rdx, rax);
1281 1281 }
1282 1282
1283 1283
1284 1284 void TemplateTable::lshr() {
1285 1285 transition(itos, ltos);
1286 1286 __ mov(rcx, rax); // get shift count
1287 1287 __ pop_l(rax, rdx); // get shift value
1288 1288 __ lshr(rdx, rax, true);
1289 1289 }
1290 1290
1291 1291
1292 1292 void TemplateTable::lushr() {
1293 1293 transition(itos, ltos);
1294 1294 __ mov(rcx, rax); // get shift count
1295 1295 __ pop_l(rax, rdx); // get shift value
1296 1296 __ lshr(rdx, rax);
1297 1297 }
1298 1298
1299 1299
1300 1300 void TemplateTable::fop2(Operation op) {
1301 1301 transition(ftos, ftos);
1302 1302 __ pop_ftos_to_rsp(); // pop ftos into rsp
1303 1303 switch (op) {
1304 1304 case add: __ fadd_s (at_rsp()); break;
1305 1305 case sub: __ fsubr_s(at_rsp()); break;
1306 1306 case mul: __ fmul_s (at_rsp()); break;
1307 1307 case div: __ fdivr_s(at_rsp()); break;
1308 1308 case rem: __ fld_s (at_rsp()); __ fremr(rax); break;
1309 1309 default : ShouldNotReachHere();
1310 1310 }
1311 1311 __ f2ieee();
1312 1312 __ pop(rax); // pop float thing off
1313 1313 }
1314 1314
1315 1315
1316 1316 void TemplateTable::dop2(Operation op) {
1317 1317 transition(dtos, dtos);
1318 1318 __ pop_dtos_to_rsp(); // pop dtos into rsp
1319 1319
1320 1320 switch (op) {
1321 1321 case add: __ fadd_d (at_rsp()); break;
1322 1322 case sub: __ fsubr_d(at_rsp()); break;
1323 1323 case mul: {
1324 1324 Label L_strict;
1325 1325 Label L_join;
1326 1326 const Address access_flags (rcx, methodOopDesc::access_flags_offset());
1327 1327 __ get_method(rcx);
1328 1328 __ movl(rcx, access_flags);
1329 1329 __ testl(rcx, JVM_ACC_STRICT);
1330 1330 __ jccb(Assembler::notZero, L_strict);
1331 1331 __ fmul_d (at_rsp());
1332 1332 __ jmpb(L_join);
1333 1333 __ bind(L_strict);
1334 1334 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1335 1335 __ fmulp();
1336 1336 __ fmul_d (at_rsp());
1337 1337 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1338 1338 __ fmulp();
1339 1339 __ bind(L_join);
1340 1340 break;
1341 1341 }
1342 1342 case div: {
1343 1343 Label L_strict;
1344 1344 Label L_join;
1345 1345 const Address access_flags (rcx, methodOopDesc::access_flags_offset());
1346 1346 __ get_method(rcx);
1347 1347 __ movl(rcx, access_flags);
1348 1348 __ testl(rcx, JVM_ACC_STRICT);
1349 1349 __ jccb(Assembler::notZero, L_strict);
1350 1350 __ fdivr_d(at_rsp());
1351 1351 __ jmp(L_join);
1352 1352 __ bind(L_strict);
1353 1353 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1354 1354 __ fmul_d (at_rsp());
1355 1355 __ fdivrp();
1356 1356 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1357 1357 __ fmulp();
1358 1358 __ bind(L_join);
1359 1359 break;
1360 1360 }
1361 1361 case rem: __ fld_d (at_rsp()); __ fremr(rax); break;
1362 1362 default : ShouldNotReachHere();
1363 1363 }
1364 1364 __ d2ieee();
1365 1365 // Pop double precision number from rsp.
1366 1366 __ pop(rax);
1367 1367 __ pop(rdx);
1368 1368 }
1369 1369
1370 1370
1371 1371 void TemplateTable::ineg() {
1372 1372 transition(itos, itos);
1373 1373 __ negl(rax);
1374 1374 }
1375 1375
1376 1376
1377 1377 void TemplateTable::lneg() {
1378 1378 transition(ltos, ltos);
1379 1379 __ lneg(rdx, rax);
1380 1380 }
1381 1381
1382 1382
1383 1383 void TemplateTable::fneg() {
1384 1384 transition(ftos, ftos);
1385 1385 __ fchs();
1386 1386 }
1387 1387
1388 1388
1389 1389 void TemplateTable::dneg() {
1390 1390 transition(dtos, dtos);
1391 1391 __ fchs();
1392 1392 }
1393 1393
1394 1394
1395 1395 void TemplateTable::iinc() {
1396 1396 transition(vtos, vtos);
1397 1397 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1398 1398 locals_index(rbx);
1399 1399 __ addl(iaddress(rbx), rdx);
1400 1400 }
1401 1401
1402 1402
1403 1403 void TemplateTable::wide_iinc() {
1404 1404 transition(vtos, vtos);
1405 1405 __ movl(rdx, at_bcp(4)); // get constant
1406 1406 locals_index_wide(rbx);
1407 1407 __ bswapl(rdx); // swap bytes & sign-extend constant
1408 1408 __ sarl(rdx, 16);
1409 1409 __ addl(iaddress(rbx), rdx);
1410 1410 // Note: should probably use only one movl to get both
1411 1411 // the index and the constant -> fix this
1412 1412 }
1413 1413
1414 1414
1415 1415 void TemplateTable::convert() {
1416 1416 // Checking
1417 1417 #ifdef ASSERT
1418 1418 { TosState tos_in = ilgl;
1419 1419 TosState tos_out = ilgl;
1420 1420 switch (bytecode()) {
1421 1421 case Bytecodes::_i2l: // fall through
1422 1422 case Bytecodes::_i2f: // fall through
1423 1423 case Bytecodes::_i2d: // fall through
1424 1424 case Bytecodes::_i2b: // fall through
1425 1425 case Bytecodes::_i2c: // fall through
1426 1426 case Bytecodes::_i2s: tos_in = itos; break;
1427 1427 case Bytecodes::_l2i: // fall through
1428 1428 case Bytecodes::_l2f: // fall through
1429 1429 case Bytecodes::_l2d: tos_in = ltos; break;
1430 1430 case Bytecodes::_f2i: // fall through
1431 1431 case Bytecodes::_f2l: // fall through
1432 1432 case Bytecodes::_f2d: tos_in = ftos; break;
1433 1433 case Bytecodes::_d2i: // fall through
1434 1434 case Bytecodes::_d2l: // fall through
1435 1435 case Bytecodes::_d2f: tos_in = dtos; break;
1436 1436 default : ShouldNotReachHere();
1437 1437 }
1438 1438 switch (bytecode()) {
1439 1439 case Bytecodes::_l2i: // fall through
1440 1440 case Bytecodes::_f2i: // fall through
1441 1441 case Bytecodes::_d2i: // fall through
1442 1442 case Bytecodes::_i2b: // fall through
1443 1443 case Bytecodes::_i2c: // fall through
1444 1444 case Bytecodes::_i2s: tos_out = itos; break;
1445 1445 case Bytecodes::_i2l: // fall through
1446 1446 case Bytecodes::_f2l: // fall through
1447 1447 case Bytecodes::_d2l: tos_out = ltos; break;
1448 1448 case Bytecodes::_i2f: // fall through
1449 1449 case Bytecodes::_l2f: // fall through
1450 1450 case Bytecodes::_d2f: tos_out = ftos; break;
1451 1451 case Bytecodes::_i2d: // fall through
1452 1452 case Bytecodes::_l2d: // fall through
1453 1453 case Bytecodes::_f2d: tos_out = dtos; break;
1454 1454 default : ShouldNotReachHere();
1455 1455 }
1456 1456 transition(tos_in, tos_out);
1457 1457 }
1458 1458 #endif // ASSERT
1459 1459
1460 1460 // Conversion
1461 1461 // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1462 1462 switch (bytecode()) {
1463 1463 case Bytecodes::_i2l:
1464 1464 __ extend_sign(rdx, rax);
1465 1465 break;
1466 1466 case Bytecodes::_i2f:
1467 1467 __ push(rax); // store int on tos
1468 1468 __ fild_s(at_rsp()); // load int to ST0
1469 1469 __ f2ieee(); // truncate to float size
1470 1470 __ pop(rcx); // adjust rsp
1471 1471 break;
1472 1472 case Bytecodes::_i2d:
1473 1473 __ push(rax); // add one slot for d2ieee()
1474 1474 __ push(rax); // store int on tos
1475 1475 __ fild_s(at_rsp()); // load int to ST0
1476 1476 __ d2ieee(); // truncate to double size
1477 1477 __ pop(rcx); // adjust rsp
1478 1478 __ pop(rcx);
1479 1479 break;
1480 1480 case Bytecodes::_i2b:
1481 1481 __ shll(rax, 24); // truncate upper 24 bits
1482 1482 __ sarl(rax, 24); // and sign-extend byte
1483 1483 LP64_ONLY(__ movsbl(rax, rax));
1484 1484 break;
1485 1485 case Bytecodes::_i2c:
1486 1486 __ andl(rax, 0xFFFF); // truncate upper 16 bits
1487 1487 LP64_ONLY(__ movzwl(rax, rax));
1488 1488 break;
1489 1489 case Bytecodes::_i2s:
1490 1490 __ shll(rax, 16); // truncate upper 16 bits
1491 1491 __ sarl(rax, 16); // and sign-extend short
1492 1492 LP64_ONLY(__ movswl(rax, rax));
1493 1493 break;
1494 1494 case Bytecodes::_l2i:
1495 1495 /* nothing to do */
1496 1496 break;
1497 1497 case Bytecodes::_l2f:
1498 1498 __ push(rdx); // store long on tos
1499 1499 __ push(rax);
1500 1500 __ fild_d(at_rsp()); // load long to ST0
1501 1501 __ f2ieee(); // truncate to float size
1502 1502 __ pop(rcx); // adjust rsp
1503 1503 __ pop(rcx);
1504 1504 break;
1505 1505 case Bytecodes::_l2d:
1506 1506 __ push(rdx); // store long on tos
1507 1507 __ push(rax);
1508 1508 __ fild_d(at_rsp()); // load long to ST0
1509 1509 __ d2ieee(); // truncate to double size
1510 1510 __ pop(rcx); // adjust rsp
1511 1511 __ pop(rcx);
1512 1512 break;
1513 1513 case Bytecodes::_f2i:
1514 1514 __ push(rcx); // reserve space for argument
1515 1515 __ fstp_s(at_rsp()); // pass float argument on stack
1516 1516 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1517 1517 break;
1518 1518 case Bytecodes::_f2l:
1519 1519 __ push(rcx); // reserve space for argument
1520 1520 __ fstp_s(at_rsp()); // pass float argument on stack
1521 1521 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1522 1522 break;
1523 1523 case Bytecodes::_f2d:
1524 1524 /* nothing to do */
1525 1525 break;
1526 1526 case Bytecodes::_d2i:
1527 1527 __ push(rcx); // reserve space for argument
1528 1528 __ push(rcx);
1529 1529 __ fstp_d(at_rsp()); // pass double argument on stack
1530 1530 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
1531 1531 break;
1532 1532 case Bytecodes::_d2l:
1533 1533 __ push(rcx); // reserve space for argument
1534 1534 __ push(rcx);
1535 1535 __ fstp_d(at_rsp()); // pass double argument on stack
1536 1536 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
1537 1537 break;
1538 1538 case Bytecodes::_d2f:
1539 1539 __ push(rcx); // reserve space for f2ieee()
1540 1540 __ f2ieee(); // truncate to float size
1541 1541 __ pop(rcx); // adjust rsp
1542 1542 break;
1543 1543 default :
1544 1544 ShouldNotReachHere();
1545 1545 }
1546 1546 }
1547 1547
1548 1548
1549 1549 void TemplateTable::lcmp() {
1550 1550 transition(ltos, itos);
1551 1551 // y = rdx:rax
1552 1552 __ pop_l(rbx, rcx); // get x = rcx:rbx
1553 1553 __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
1554 1554 __ mov(rax, rcx);
1555 1555 }
1556 1556
1557 1557
1558 1558 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1559 1559 if (is_float) {
1560 1560 __ pop_ftos_to_rsp();
1561 1561 __ fld_s(at_rsp());
1562 1562 } else {
1563 1563 __ pop_dtos_to_rsp();
1564 1564 __ fld_d(at_rsp());
1565 1565 __ pop(rdx);
1566 1566 }
1567 1567 __ pop(rcx);
1568 1568 __ fcmp2int(rax, unordered_result < 0);
1569 1569 }
1570 1570
1571 1571
1572 1572 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1573 1573 __ get_method(rcx); // ECX holds method
1574 1574 __ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count
1575 1575
1576 1576 const ByteSize be_offset = methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset();
1577 1577 const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset();
1578 1578 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
1579 1579
1580 1580 // Load up EDX with the branch displacement
1581 1581 __ movl(rdx, at_bcp(1));
1582 1582 __ bswapl(rdx);
1583 1583 if (!is_wide) __ sarl(rdx, 16);
1584 1584 LP64_ONLY(__ movslq(rdx, rdx));
1585 1585
1586 1586
1587 1587 // Handle all the JSR stuff here, then exit.
1588 1588 // It's much shorter and cleaner than intermingling with the
1589 1589 // non-JSR normal-branch stuff occurring below.
1590 1590 if (is_jsr) {
1591 1591 // Pre-load the next target bytecode into EBX
1592 1592 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0));
1593 1593
1594 1594 // compute return address as bci in rax,
1595 1595 __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(constMethodOopDesc::codes_offset())));
1596 1596 __ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
1597 1597 // Adjust the bcp in RSI by the displacement in EDX
1598 1598 __ addptr(rsi, rdx);
1599 1599 // Push return address
1600 1600 __ push_i(rax);
1601 1601 // jsr returns vtos
1602 1602 __ dispatch_only_noverify(vtos);
1603 1603 return;
1604 1604 }
1605 1605
1606 1606 // Normal (non-jsr) branch handling
1607 1607
1608 1608 // Adjust the bcp in RSI by the displacement in EDX
1609 1609 __ addptr(rsi, rdx);
1610 1610
1611 1611 assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
1612 1612 Label backedge_counter_overflow;
1613 1613 Label profile_method;
1614 1614 Label dispatch;
1615 1615 if (UseLoopCounter) {
1616 1616 // increment backedge counter for backward branches
1617 1617 // rax,: MDO
1618 1618 // rbx,: MDO bumped taken-count
1619 1619 // rcx: method
1620 1620 // rdx: target offset
1621 1621 // rsi: target bcp
1622 1622 // rdi: locals pointer
1623 1623 __ testl(rdx, rdx); // check if forward or backward branch
1624 1624 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1625 1625
1626 1626 // increment counter
1627 1627 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1628 1628 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1629 1629 __ movl(Address(rcx, be_offset), rax); // store counter
1630 1630
1631 1631 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1632 1632 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1633 1633 __ addl(rax, Address(rcx, be_offset)); // add both counters
1634 1634
1635 1635 if (ProfileInterpreter) {
1636 1636 // Test to see if we should create a method data oop
1637 1637 __ cmp32(rax,
1638 1638 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1639 1639 __ jcc(Assembler::less, dispatch);
1640 1640
1641 1641 // if no method data exists, go to profile method
1642 1642 __ test_method_data_pointer(rax, profile_method);
1643 1643
1644 1644 if (UseOnStackReplacement) {
1645 1645 // check for overflow against rbx, which is the MDO taken count
1646 1646 __ cmp32(rbx,
1647 1647 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1648 1648 __ jcc(Assembler::below, dispatch);
1649 1649
1650 1650 // When ProfileInterpreter is on, the backedge_count comes from the
1651 1651 // methodDataOop, which value does not get reset on the call to
1652 1652 // frequency_counter_overflow(). To avoid excessive calls to the overflow
1653 1653 // routine while the method is being compiled, add a second test to make
1654 1654 // sure the overflow function is called only once every overflow_frequency.
1655 1655 const int overflow_frequency = 1024;
1656 1656 __ andptr(rbx, overflow_frequency-1);
1657 1657 __ jcc(Assembler::zero, backedge_counter_overflow);
1658 1658
1659 1659 }
1660 1660 } else {
1661 1661 if (UseOnStackReplacement) {
1662 1662 // check for overflow against rax, which is the sum of the counters
1663 1663 __ cmp32(rax,
1664 1664 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1665 1665 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1666 1666
1667 1667 }
1668 1668 }
1669 1669 __ bind(dispatch);
1670 1670 }
1671 1671
1672 1672 // Pre-load the next target bytecode into EBX
1673 1673 __ load_unsigned_byte(rbx, Address(rsi, 0));
1674 1674
1675 1675 // continue with the bytecode @ target
1676 1676 // rax,: return bci for jsr's, unused otherwise
1677 1677 // rbx,: target bytecode
1678 1678 // rsi: target bcp
1679 1679 __ dispatch_only(vtos);
1680 1680
1681 1681 if (UseLoopCounter) {
1682 1682 if (ProfileInterpreter) {
1683 1683 // Out-of-line code to allocate method data oop.
1684 1684 __ bind(profile_method);
1685 1685 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi);
1686 1686 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1687 1687 __ movptr(rcx, Address(rbp, method_offset));
1688 1688 __ movptr(rcx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
1689 1689 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
1690 1690 __ test_method_data_pointer(rcx, dispatch);
1691 1691 // offset non-null mdp by MDO::data_offset() + IR::profile_method()
1692 1692 __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset()));
1693 1693 __ addptr(rcx, rax);
1694 1694 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
1695 1695 __ jmp(dispatch);
1696 1696 }
1697 1697
1698 1698 if (UseOnStackReplacement) {
1699 1699
1700 1700 // invocation counter overflow
1701 1701 __ bind(backedge_counter_overflow);
1702 1702 __ negptr(rdx);
1703 1703 __ addptr(rdx, rsi); // branch bcp
1704 1704 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rdx);
1705 1705 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1706 1706
1707 1707 // rax,: osr nmethod (osr ok) or NULL (osr not possible)
1708 1708 // rbx,: target bytecode
1709 1709 // rdx: scratch
1710 1710 // rdi: locals pointer
1711 1711 // rsi: bcp
1712 1712 __ testptr(rax, rax); // test result
1713 1713 __ jcc(Assembler::zero, dispatch); // no osr if null
1714 1714 // nmethod may have been invalidated (VM may block upon call_VM return)
1715 1715 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1716 1716 __ cmpl(rcx, InvalidOSREntryBci);
1717 1717 __ jcc(Assembler::equal, dispatch);
1718 1718
1719 1719 // We have the address of an on stack replacement routine in rax,
1720 1720 // We need to prepare to execute the OSR method. First we must
1721 1721 // migrate the locals and monitors off of the stack.
1722 1722
1723 1723 __ mov(rbx, rax); // save the nmethod
1724 1724
1725 1725 const Register thread = rcx;
1726 1726 __ get_thread(thread);
1727 1727 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1728 1728 // rax, is OSR buffer, move it to expected parameter location
1729 1729 __ mov(rcx, rax);
1730 1730
1731 1731 // pop the interpreter frame
1732 1732 __ movptr(rdx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1733 1733 __ leave(); // remove frame anchor
1734 1734 __ pop(rdi); // get return address
1735 1735 __ mov(rsp, rdx); // set sp to sender sp
1736 1736
1737 1737
1738 1738 Label skip;
1739 1739 Label chkint;
1740 1740
1741 1741 // The interpreter frame we have removed may be returning to
1742 1742 // either the callstub or the interpreter. Since we will
1743 1743 // now be returning from a compiled (OSR) nmethod we must
1744 1744 // adjust the return to the return were it can handler compiled
1745 1745 // results and clean the fpu stack. This is very similar to
1746 1746 // what a i2c adapter must do.
1747 1747
1748 1748 // Are we returning to the call stub?
1749 1749
1750 1750 __ cmp32(rdi, ExternalAddress(StubRoutines::_call_stub_return_address));
1751 1751 __ jcc(Assembler::notEqual, chkint);
1752 1752
1753 1753 // yes adjust to the specialized call stub return.
1754 1754 assert(StubRoutines::x86::get_call_stub_compiled_return() != NULL, "must be set");
1755 1755 __ lea(rdi, ExternalAddress(StubRoutines::x86::get_call_stub_compiled_return()));
1756 1756 __ jmp(skip);
1757 1757
1758 1758 __ bind(chkint);
1759 1759
1760 1760 // Are we returning to the interpreter? Look for sentinel
1761 1761
1762 1762 __ cmpl(Address(rdi, -2*wordSize), Interpreter::return_sentinel);
1763 1763 __ jcc(Assembler::notEqual, skip);
1764 1764
1765 1765 // Adjust to compiled return back to interpreter
1766 1766
1767 1767 __ movptr(rdi, Address(rdi, -wordSize));
1768 1768 __ bind(skip);
1769 1769
1770 1770 // Align stack pointer for compiled code (note that caller is
1771 1771 // responsible for undoing this fixup by remembering the old SP
1772 1772 // in an rbp,-relative location)
1773 1773 __ andptr(rsp, -(StackAlignmentInBytes));
1774 1774
1775 1775 // push the (possibly adjusted) return address
1776 1776 __ push(rdi);
1777 1777
1778 1778 // and begin the OSR nmethod
1779 1779 __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
1780 1780 }
1781 1781 }
1782 1782 }
1783 1783
1784 1784
1785 1785 void TemplateTable::if_0cmp(Condition cc) {
1786 1786 transition(itos, vtos);
1787 1787 // assume branch is more often taken than not (loops use backward branches)
1788 1788 Label not_taken;
1789 1789 __ testl(rax, rax);
1790 1790 __ jcc(j_not(cc), not_taken);
1791 1791 branch(false, false);
1792 1792 __ bind(not_taken);
1793 1793 __ profile_not_taken_branch(rax);
1794 1794 }
1795 1795
1796 1796
1797 1797 void TemplateTable::if_icmp(Condition cc) {
1798 1798 transition(itos, vtos);
1799 1799 // assume branch is more often taken than not (loops use backward branches)
1800 1800 Label not_taken;
1801 1801 __ pop_i(rdx);
1802 1802 __ cmpl(rdx, rax);
1803 1803 __ jcc(j_not(cc), not_taken);
1804 1804 branch(false, false);
1805 1805 __ bind(not_taken);
1806 1806 __ profile_not_taken_branch(rax);
1807 1807 }
1808 1808
1809 1809
1810 1810 void TemplateTable::if_nullcmp(Condition cc) {
1811 1811 transition(atos, vtos);
1812 1812 // assume branch is more often taken than not (loops use backward branches)
1813 1813 Label not_taken;
1814 1814 __ testptr(rax, rax);
1815 1815 __ jcc(j_not(cc), not_taken);
1816 1816 branch(false, false);
1817 1817 __ bind(not_taken);
1818 1818 __ profile_not_taken_branch(rax);
1819 1819 }
1820 1820
1821 1821
1822 1822 void TemplateTable::if_acmp(Condition cc) {
1823 1823 transition(atos, vtos);
1824 1824 // assume branch is more often taken than not (loops use backward branches)
1825 1825 Label not_taken;
1826 1826 __ pop_ptr(rdx);
1827 1827 __ cmpptr(rdx, rax);
1828 1828 __ jcc(j_not(cc), not_taken);
1829 1829 branch(false, false);
1830 1830 __ bind(not_taken);
1831 1831 __ profile_not_taken_branch(rax);
1832 1832 }
1833 1833
1834 1834
1835 1835 void TemplateTable::ret() {
1836 1836 transition(vtos, vtos);
1837 1837 locals_index(rbx);
1838 1838 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1839 1839 __ profile_ret(rbx, rcx);
1840 1840 __ get_method(rax);
1841 1841 __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
1842 1842 __ lea(rsi, Address(rsi, rbx, Address::times_1,
1843 1843 constMethodOopDesc::codes_offset()));
1844 1844 __ dispatch_next(vtos);
1845 1845 }
1846 1846
1847 1847
1848 1848 void TemplateTable::wide_ret() {
1849 1849 transition(vtos, vtos);
1850 1850 locals_index_wide(rbx);
1851 1851 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1852 1852 __ profile_ret(rbx, rcx);
1853 1853 __ get_method(rax);
1854 1854 __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
1855 1855 __ lea(rsi, Address(rsi, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
1856 1856 __ dispatch_next(vtos);
1857 1857 }
1858 1858
1859 1859
1860 1860 void TemplateTable::tableswitch() {
1861 1861 Label default_case, continue_execution;
1862 1862 transition(itos, vtos);
1863 1863 // align rsi
1864 1864 __ lea(rbx, at_bcp(wordSize));
1865 1865 __ andptr(rbx, -wordSize);
1866 1866 // load lo & hi
1867 1867 __ movl(rcx, Address(rbx, 1 * wordSize));
1868 1868 __ movl(rdx, Address(rbx, 2 * wordSize));
1869 1869 __ bswapl(rcx);
1870 1870 __ bswapl(rdx);
1871 1871 // check against lo & hi
1872 1872 __ cmpl(rax, rcx);
1873 1873 __ jccb(Assembler::less, default_case);
1874 1874 __ cmpl(rax, rdx);
1875 1875 __ jccb(Assembler::greater, default_case);
1876 1876 // lookup dispatch offset
1877 1877 __ subl(rax, rcx);
1878 1878 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1879 1879 __ profile_switch_case(rax, rbx, rcx);
1880 1880 // continue execution
1881 1881 __ bind(continue_execution);
1882 1882 __ bswapl(rdx);
1883 1883 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1884 1884 __ addptr(rsi, rdx);
1885 1885 __ dispatch_only(vtos);
1886 1886 // handle default
1887 1887 __ bind(default_case);
1888 1888 __ profile_switch_default(rax);
1889 1889 __ movl(rdx, Address(rbx, 0));
1890 1890 __ jmp(continue_execution);
1891 1891 }
1892 1892
1893 1893
1894 1894 void TemplateTable::lookupswitch() {
1895 1895 transition(itos, itos);
1896 1896 __ stop("lookupswitch bytecode should have been rewritten");
1897 1897 }
1898 1898
1899 1899
1900 1900 void TemplateTable::fast_linearswitch() {
1901 1901 transition(itos, vtos);
1902 1902 Label loop_entry, loop, found, continue_execution;
1903 1903 // bswapl rax, so we can avoid bswapping the table entries
1904 1904 __ bswapl(rax);
1905 1905 // align rsi
1906 1906 __ lea(rbx, at_bcp(wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1907 1907 __ andptr(rbx, -wordSize);
1908 1908 // set counter
1909 1909 __ movl(rcx, Address(rbx, wordSize));
1910 1910 __ bswapl(rcx);
1911 1911 __ jmpb(loop_entry);
1912 1912 // table search
1913 1913 __ bind(loop);
1914 1914 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * wordSize));
1915 1915 __ jccb(Assembler::equal, found);
1916 1916 __ bind(loop_entry);
1917 1917 __ decrementl(rcx);
1918 1918 __ jcc(Assembler::greaterEqual, loop);
1919 1919 // default case
1920 1920 __ profile_switch_default(rax);
1921 1921 __ movl(rdx, Address(rbx, 0));
1922 1922 __ jmpb(continue_execution);
1923 1923 // entry found -> get offset
1924 1924 __ bind(found);
1925 1925 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * wordSize));
1926 1926 __ profile_switch_case(rcx, rax, rbx);
1927 1927 // continue execution
1928 1928 __ bind(continue_execution);
1929 1929 __ bswapl(rdx);
1930 1930 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1931 1931 __ addptr(rsi, rdx);
1932 1932 __ dispatch_only(vtos);
1933 1933 }
1934 1934
1935 1935
1936 1936 void TemplateTable::fast_binaryswitch() {
1937 1937 transition(itos, vtos);
1938 1938 // Implementation using the following core algorithm:
1939 1939 //
1940 1940 // int binary_search(int key, LookupswitchPair* array, int n) {
1941 1941 // // Binary search according to "Methodik des Programmierens" by
1942 1942 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1943 1943 // int i = 0;
1944 1944 // int j = n;
1945 1945 // while (i+1 < j) {
1946 1946 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1947 1947 // // with Q: for all i: 0 <= i < n: key < a[i]
1948 1948 // // where a stands for the array and assuming that the (inexisting)
1949 1949 // // element a[n] is infinitely big.
1950 1950 // int h = (i + j) >> 1;
1951 1951 // // i < h < j
1952 1952 // if (key < array[h].fast_match()) {
1953 1953 // j = h;
1954 1954 // } else {
1955 1955 // i = h;
1956 1956 // }
1957 1957 // }
1958 1958 // // R: a[i] <= key < a[i+1] or Q
1959 1959 // // (i.e., if key is within array, i is the correct index)
1960 1960 // return i;
1961 1961 // }
1962 1962
1963 1963 // register allocation
1964 1964 const Register key = rax; // already set (tosca)
1965 1965 const Register array = rbx;
1966 1966 const Register i = rcx;
1967 1967 const Register j = rdx;
1968 1968 const Register h = rdi; // needs to be restored
1969 1969 const Register temp = rsi;
1970 1970 // setup array
1971 1971 __ save_bcp();
1972 1972
1973 1973 __ lea(array, at_bcp(3*wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1974 1974 __ andptr(array, -wordSize);
1975 1975 // initialize i & j
1976 1976 __ xorl(i, i); // i = 0;
1977 1977 __ movl(j, Address(array, -wordSize)); // j = length(array);
1978 1978 // Convert j into native byteordering
1979 1979 __ bswapl(j);
1980 1980 // and start
1981 1981 Label entry;
1982 1982 __ jmp(entry);
1983 1983
1984 1984 // binary search loop
1985 1985 { Label loop;
1986 1986 __ bind(loop);
1987 1987 // int h = (i + j) >> 1;
1988 1988 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
1989 1989 __ sarl(h, 1); // h = (i + j) >> 1;
1990 1990 // if (key < array[h].fast_match()) {
1991 1991 // j = h;
1992 1992 // } else {
1993 1993 // i = h;
1994 1994 // }
1995 1995 // Convert array[h].match to native byte-ordering before compare
1996 1996 __ movl(temp, Address(array, h, Address::times_8, 0*wordSize));
1997 1997 __ bswapl(temp);
1998 1998 __ cmpl(key, temp);
1999 1999 if (VM_Version::supports_cmov()) {
2000 2000 __ cmovl(Assembler::less , j, h); // j = h if (key < array[h].fast_match())
2001 2001 __ cmovl(Assembler::greaterEqual, i, h); // i = h if (key >= array[h].fast_match())
2002 2002 } else {
2003 2003 Label set_i, end_of_if;
2004 2004 __ jccb(Assembler::greaterEqual, set_i); // {
2005 2005 __ mov(j, h); // j = h;
2006 2006 __ jmp(end_of_if); // }
2007 2007 __ bind(set_i); // else {
2008 2008 __ mov(i, h); // i = h;
2009 2009 __ bind(end_of_if); // }
2010 2010 }
2011 2011 // while (i+1 < j)
2012 2012 __ bind(entry);
2013 2013 __ leal(h, Address(i, 1)); // i+1
2014 2014 __ cmpl(h, j); // i+1 < j
2015 2015 __ jcc(Assembler::less, loop);
2016 2016 }
2017 2017
2018 2018 // end of binary search, result index is i (must check again!)
2019 2019 Label default_case;
2020 2020 // Convert array[i].match to native byte-ordering before compare
2021 2021 __ movl(temp, Address(array, i, Address::times_8, 0*wordSize));
2022 2022 __ bswapl(temp);
2023 2023 __ cmpl(key, temp);
2024 2024 __ jcc(Assembler::notEqual, default_case);
2025 2025
2026 2026 // entry found -> j = offset
2027 2027 __ movl(j , Address(array, i, Address::times_8, 1*wordSize));
2028 2028 __ profile_switch_case(i, key, array);
2029 2029 __ bswapl(j);
2030 2030 LP64_ONLY(__ movslq(j, j));
2031 2031 __ restore_bcp();
2032 2032 __ restore_locals(); // restore rdi
2033 2033 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
2034 2034
2035 2035 __ addptr(rsi, j);
2036 2036 __ dispatch_only(vtos);
2037 2037
2038 2038 // default case -> j = default offset
2039 2039 __ bind(default_case);
2040 2040 __ profile_switch_default(i);
2041 2041 __ movl(j, Address(array, -2*wordSize));
2042 2042 __ bswapl(j);
2043 2043 LP64_ONLY(__ movslq(j, j));
2044 2044 __ restore_bcp();
2045 2045 __ restore_locals(); // restore rdi
2046 2046 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
2047 2047 __ addptr(rsi, j);
2048 2048 __ dispatch_only(vtos);
2049 2049 }
2050 2050
2051 2051
2052 2052 void TemplateTable::_return(TosState state) {
2053 2053 transition(state, state);
2054 2054 assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2055 2055
2056 2056 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2057 2057 assert(state == vtos, "only valid state");
2058 2058 __ movptr(rax, aaddress(0));
2059 2059 __ movptr(rdi, Address(rax, oopDesc::klass_offset_in_bytes()));
2060 2060 __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
2061 2061 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2062 2062 Label skip_register_finalizer;
2063 2063 __ jcc(Assembler::zero, skip_register_finalizer);
2064 2064
2065 2065 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), rax);
2066 2066
2067 2067 __ bind(skip_register_finalizer);
2068 2068 }
2069 2069
2070 2070 __ remove_activation(state, rsi);
2071 2071 __ jmp(rsi);
2072 2072 }
2073 2073
2074 2074
2075 2075 // ----------------------------------------------------------------------------
2076 2076 // Volatile variables demand their effects be made known to all CPU's in
2077 2077 // order. Store buffers on most chips allow reads & writes to reorder; the
2078 2078 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2079 2079 // memory barrier (i.e., it's not sufficient that the interpreter does not
2080 2080 // reorder volatile references, the hardware also must not reorder them).
2081 2081 //
2082 2082 // According to the new Java Memory Model (JMM):
2083 2083 // (1) All volatiles are serialized wrt to each other.
2084 2084 // ALSO reads & writes act as aquire & release, so:
2085 2085 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2086 2086 // the read float up to before the read. It's OK for non-volatile memory refs
2087 2087 // that happen before the volatile read to float down below it.
2088 2088 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2089 2089 // that happen BEFORE the write float down to after the write. It's OK for
2090 2090 // non-volatile memory refs that happen after the volatile write to float up
2091 2091 // before it.
2092 2092 //
2093 2093 // We only put in barriers around volatile refs (they are expensive), not
2094 2094 // _between_ memory refs (that would require us to track the flavor of the
2095 2095 // previous memory refs). Requirements (2) and (3) require some barriers
2096 2096 // before volatile stores and after volatile loads. These nearly cover
2097 2097 // requirement (1) but miss the volatile-store-volatile-load case. This final
2098 2098 // case is placed after volatile-stores although it could just as well go
2099 2099 // before volatile-loads.
2100 2100 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2101 2101 // Helper function to insert a is-volatile test and memory barrier
2102 2102 if( !os::is_MP() ) return; // Not needed on single CPU
2103 2103 __ membar(order_constraint);
2104 2104 }
2105 2105
2106 2106 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
2107 2107 assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
2108 2108 bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
2109 2109
2110 2110 Register temp = rbx;
2111 2111
2112 2112 assert_different_registers(Rcache, index, temp);
2113 2113
2114 2114 const int shift_count = (1 + byte_no)*BitsPerByte;
2115 2115 Label resolved;
2116 2116 __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
2117 2117 if (is_invokedynamic) {
2118 2118 // we are resolved if the f1 field contains a non-null CallSite object
2119 2119 __ cmpptr(Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()), (int32_t) NULL_WORD);
2120 2120 __ jcc(Assembler::notEqual, resolved);
2121 2121 } else {
2122 2122 __ movl(temp, Address(Rcache, index, Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
2123 2123 __ shrl(temp, shift_count);
2124 2124 // have we resolved this bytecode?
2125 2125 __ andl(temp, 0xFF);
2126 2126 __ cmpl(temp, (int)bytecode());
2127 2127 __ jcc(Assembler::equal, resolved);
2128 2128 }
2129 2129
2130 2130 // resolve first time through
2131 2131 address entry;
2132 2132 switch (bytecode()) {
2133 2133 case Bytecodes::_getstatic : // fall through
2134 2134 case Bytecodes::_putstatic : // fall through
2135 2135 case Bytecodes::_getfield : // fall through
2136 2136 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2137 2137 case Bytecodes::_invokevirtual : // fall through
2138 2138 case Bytecodes::_invokespecial : // fall through
2139 2139 case Bytecodes::_invokestatic : // fall through
2140 2140 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2141 2141 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2142 2142 default : ShouldNotReachHere(); break;
2143 2143 }
2144 2144 __ movl(temp, (int)bytecode());
2145 2145 __ call_VM(noreg, entry, temp);
2146 2146 // Update registers with resolved info
2147 2147 __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
2148 2148 __ bind(resolved);
2149 2149 }
2150 2150
2151 2151
2152 2152 // The cache and index registers must be set before call
2153 2153 void TemplateTable::load_field_cp_cache_entry(Register obj,
2154 2154 Register cache,
2155 2155 Register index,
2156 2156 Register off,
2157 2157 Register flags,
2158 2158 bool is_static = false) {
2159 2159 assert_different_registers(cache, index, flags, off);
2160 2160
2161 2161 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2162 2162 // Field offset
2163 2163 __ movptr(off, Address(cache, index, Address::times_ptr,
2164 2164 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())));
2165 2165 // Flags
2166 2166 __ movl(flags, Address(cache, index, Address::times_ptr,
2167 2167 in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())));
2168 2168
2169 2169 // klass overwrite register
2170 2170 if (is_static) {
2171 2171 __ movptr(obj, Address(cache, index, Address::times_ptr,
2172 2172 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())));
2173 2173 }
2174 2174 }
2175 2175
2176 2176 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2177 2177 Register method,
2178 2178 Register itable_index,
2179 2179 Register flags,
2180 2180 bool is_invokevirtual,
2181 2181 bool is_invokevfinal /*unused*/) {
2182 2182 // setup registers
2183 2183 const Register cache = rcx;
2184 2184 const Register index = rdx;
2185 2185 assert_different_registers(method, flags);
2186 2186 assert_different_registers(method, cache, index);
2187 2187 assert_different_registers(itable_index, flags);
2188 2188 assert_different_registers(itable_index, cache, index);
2189 2189 // determine constant pool cache field offsets
2190 2190 const int method_offset = in_bytes(
2191 2191 constantPoolCacheOopDesc::base_offset() +
2192 2192 (is_invokevirtual
2193 2193 ? ConstantPoolCacheEntry::f2_offset()
2194 2194 : ConstantPoolCacheEntry::f1_offset()
2195 2195 )
2196 2196 );
2197 2197 const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2198 2198 ConstantPoolCacheEntry::flags_offset());
2199 2199 // access constant pool cache fields
2200 2200 const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2201 2201 ConstantPoolCacheEntry::f2_offset());
2202 2202
2203 2203 resolve_cache_and_index(byte_no, cache, index);
2204 2204
2205 2205 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2206 2206 if (itable_index != noreg) {
2207 2207 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2208 2208 }
2209 2209 __ movl(flags , Address(cache, index, Address::times_ptr, flags_offset ));
2210 2210 }
2211 2211
2212 2212
2213 2213 // The registers cache and index expected to be set before call.
2214 2214 // Correct values of the cache and index registers are preserved.
2215 2215 void TemplateTable::jvmti_post_field_access(Register cache,
2216 2216 Register index,
2217 2217 bool is_static,
2218 2218 bool has_tos) {
2219 2219 if (JvmtiExport::can_post_field_access()) {
2220 2220 // Check to see if a field access watch has been set before we take
2221 2221 // the time to call into the VM.
2222 2222 Label L1;
2223 2223 assert_different_registers(cache, index, rax);
2224 2224 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2225 2225 __ testl(rax,rax);
2226 2226 __ jcc(Assembler::zero, L1);
2227 2227
2228 2228 // cache entry pointer
2229 2229 __ addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
2230 2230 __ shll(index, LogBytesPerWord);
2231 2231 __ addptr(cache, index);
2232 2232 if (is_static) {
2233 2233 __ xorptr(rax, rax); // NULL object reference
2234 2234 } else {
2235 2235 __ pop(atos); // Get the object
2236 2236 __ verify_oop(rax);
2237 2237 __ push(atos); // Restore stack state
2238 2238 }
2239 2239 // rax,: object pointer or NULL
2240 2240 // cache: cache entry pointer
2241 2241 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2242 2242 rax, cache);
2243 2243 __ get_cache_and_index_at_bcp(cache, index, 1);
2244 2244 __ bind(L1);
2245 2245 }
2246 2246 }
2247 2247
2248 2248 void TemplateTable::pop_and_check_object(Register r) {
2249 2249 __ pop_ptr(r);
2250 2250 __ null_check(r); // for field access must check obj.
2251 2251 __ verify_oop(r);
2252 2252 }
2253 2253
2254 2254 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2255 2255 transition(vtos, vtos);
2256 2256
2257 2257 const Register cache = rcx;
2258 2258 const Register index = rdx;
2259 2259 const Register obj = rcx;
2260 2260 const Register off = rbx;
2261 2261 const Register flags = rax;
2262 2262
2263 2263 resolve_cache_and_index(byte_no, cache, index);
2264 2264 jvmti_post_field_access(cache, index, is_static, false);
2265 2265 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2266 2266
2267 2267 if (!is_static) pop_and_check_object(obj);
2268 2268
2269 2269 const Address lo(obj, off, Address::times_1, 0*wordSize);
2270 2270 const Address hi(obj, off, Address::times_1, 1*wordSize);
2271 2271
2272 2272 Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2273 2273
2274 2274 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2275 2275 assert(btos == 0, "change code, btos != 0");
2276 2276 // btos
2277 2277 __ andptr(flags, 0x0f);
2278 2278 __ jcc(Assembler::notZero, notByte);
2279 2279
2280 2280 __ load_signed_byte(rax, lo );
2281 2281 __ push(btos);
2282 2282 // Rewrite bytecode to be faster
2283 2283 if (!is_static) {
2284 2284 patch_bytecode(Bytecodes::_fast_bgetfield, rcx, rbx);
2285 2285 }
2286 2286 __ jmp(Done);
2287 2287
2288 2288 __ bind(notByte);
2289 2289 // itos
2290 2290 __ cmpl(flags, itos );
2291 2291 __ jcc(Assembler::notEqual, notInt);
2292 2292
2293 2293 __ movl(rax, lo );
2294 2294 __ push(itos);
2295 2295 // Rewrite bytecode to be faster
2296 2296 if (!is_static) {
2297 2297 patch_bytecode(Bytecodes::_fast_igetfield, rcx, rbx);
2298 2298 }
2299 2299 __ jmp(Done);
2300 2300
2301 2301 __ bind(notInt);
2302 2302 // atos
2303 2303 __ cmpl(flags, atos );
2304 2304 __ jcc(Assembler::notEqual, notObj);
2305 2305
2306 2306 __ movl(rax, lo );
2307 2307 __ push(atos);
2308 2308 if (!is_static) {
2309 2309 patch_bytecode(Bytecodes::_fast_agetfield, rcx, rbx);
2310 2310 }
2311 2311 __ jmp(Done);
2312 2312
2313 2313 __ bind(notObj);
2314 2314 // ctos
2315 2315 __ cmpl(flags, ctos );
2316 2316 __ jcc(Assembler::notEqual, notChar);
2317 2317
2318 2318 __ load_unsigned_short(rax, lo );
2319 2319 __ push(ctos);
2320 2320 if (!is_static) {
2321 2321 patch_bytecode(Bytecodes::_fast_cgetfield, rcx, rbx);
2322 2322 }
2323 2323 __ jmp(Done);
2324 2324
2325 2325 __ bind(notChar);
2326 2326 // stos
2327 2327 __ cmpl(flags, stos );
2328 2328 __ jcc(Assembler::notEqual, notShort);
2329 2329
2330 2330 __ load_signed_short(rax, lo );
2331 2331 __ push(stos);
2332 2332 if (!is_static) {
2333 2333 patch_bytecode(Bytecodes::_fast_sgetfield, rcx, rbx);
2334 2334 }
2335 2335 __ jmp(Done);
2336 2336
2337 2337 __ bind(notShort);
2338 2338 // ltos
2339 2339 __ cmpl(flags, ltos );
2340 2340 __ jcc(Assembler::notEqual, notLong);
2341 2341
2342 2342 // Generate code as if volatile. There just aren't enough registers to
2343 2343 // save that information and this code is faster than the test.
2344 2344 __ fild_d(lo); // Must load atomically
2345 2345 __ subptr(rsp,2*wordSize); // Make space for store
2346 2346 __ fistp_d(Address(rsp,0));
2347 2347 __ pop(rax);
2348 2348 __ pop(rdx);
2349 2349
2350 2350 __ push(ltos);
2351 2351 // Don't rewrite to _fast_lgetfield for potential volatile case.
2352 2352 __ jmp(Done);
2353 2353
2354 2354 __ bind(notLong);
2355 2355 // ftos
2356 2356 __ cmpl(flags, ftos );
2357 2357 __ jcc(Assembler::notEqual, notFloat);
2358 2358
2359 2359 __ fld_s(lo);
2360 2360 __ push(ftos);
2361 2361 if (!is_static) {
2362 2362 patch_bytecode(Bytecodes::_fast_fgetfield, rcx, rbx);
2363 2363 }
2364 2364 __ jmp(Done);
2365 2365
2366 2366 __ bind(notFloat);
2367 2367 // dtos
2368 2368 __ cmpl(flags, dtos );
2369 2369 __ jcc(Assembler::notEqual, notDouble);
2370 2370
2371 2371 __ fld_d(lo);
2372 2372 __ push(dtos);
2373 2373 if (!is_static) {
2374 2374 patch_bytecode(Bytecodes::_fast_dgetfield, rcx, rbx);
2375 2375 }
2376 2376 __ jmpb(Done);
2377 2377
2378 2378 __ bind(notDouble);
2379 2379
2380 2380 __ stop("Bad state");
2381 2381
2382 2382 __ bind(Done);
2383 2383 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2384 2384 // volatile_barrier( );
2385 2385 }
2386 2386
2387 2387
2388 2388 void TemplateTable::getfield(int byte_no) {
2389 2389 getfield_or_static(byte_no, false);
2390 2390 }
2391 2391
2392 2392
2393 2393 void TemplateTable::getstatic(int byte_no) {
2394 2394 getfield_or_static(byte_no, true);
2395 2395 }
2396 2396
2397 2397 // The registers cache and index expected to be set before call.
2398 2398 // The function may destroy various registers, just not the cache and index registers.
2399 2399 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2400 2400
2401 2401 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2402 2402
2403 2403 if (JvmtiExport::can_post_field_modification()) {
2404 2404 // Check to see if a field modification watch has been set before we take
2405 2405 // the time to call into the VM.
2406 2406 Label L1;
2407 2407 assert_different_registers(cache, index, rax);
2408 2408 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2409 2409 __ testl(rax, rax);
2410 2410 __ jcc(Assembler::zero, L1);
2411 2411
2412 2412 // The cache and index registers have been already set.
2413 2413 // This allows to eliminate this call but the cache and index
2414 2414 // registers have to be correspondingly used after this line.
2415 2415 __ get_cache_and_index_at_bcp(rax, rdx, 1);
2416 2416
2417 2417 if (is_static) {
2418 2418 // Life is simple. Null out the object pointer.
2419 2419 __ xorptr(rbx, rbx);
2420 2420 } else {
2421 2421 // Life is harder. The stack holds the value on top, followed by the object.
2422 2422 // We don't know the size of the value, though; it could be one or two words
2423 2423 // depending on its type. As a result, we must find the type to determine where
2424 2424 // the object is.
2425 2425 Label two_word, valsize_known;
2426 2426 __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset +
2427 2427 ConstantPoolCacheEntry::flags_offset())));
2428 2428 __ mov(rbx, rsp);
2429 2429 __ shrl(rcx, ConstantPoolCacheEntry::tosBits);
2430 2430 // Make sure we don't need to mask rcx for tosBits after the above shift
2431 2431 ConstantPoolCacheEntry::verify_tosBits();
2432 2432 __ cmpl(rcx, ltos);
2433 2433 __ jccb(Assembler::equal, two_word);
2434 2434 __ cmpl(rcx, dtos);
2435 2435 __ jccb(Assembler::equal, two_word);
2436 2436 __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
2437 2437 __ jmpb(valsize_known);
2438 2438
2439 2439 __ bind(two_word);
2440 2440 __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
2441 2441
2442 2442 __ bind(valsize_known);
2443 2443 // setup object pointer
2444 2444 __ movptr(rbx, Address(rbx, 0));
2445 2445 }
2446 2446 // cache entry pointer
2447 2447 __ addptr(rax, in_bytes(cp_base_offset));
2448 2448 __ shll(rdx, LogBytesPerWord);
2449 2449 __ addptr(rax, rdx);
2450 2450 // object (tos)
2451 2451 __ mov(rcx, rsp);
2452 2452 // rbx,: object pointer set up above (NULL if static)
2453 2453 // rax,: cache entry pointer
2454 2454 // rcx: jvalue object on the stack
2455 2455 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2456 2456 rbx, rax, rcx);
2457 2457 __ get_cache_and_index_at_bcp(cache, index, 1);
2458 2458 __ bind(L1);
2459 2459 }
2460 2460 }
2461 2461
2462 2462
2463 2463 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2464 2464 transition(vtos, vtos);
2465 2465
2466 2466 const Register cache = rcx;
2467 2467 const Register index = rdx;
2468 2468 const Register obj = rcx;
2469 2469 const Register off = rbx;
2470 2470 const Register flags = rax;
2471 2471
2472 2472 resolve_cache_and_index(byte_no, cache, index);
2473 2473 jvmti_post_field_mod(cache, index, is_static);
2474 2474 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2475 2475
2476 2476 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2477 2477 // volatile_barrier( );
2478 2478
2479 2479 Label notVolatile, Done;
2480 2480 __ movl(rdx, flags);
2481 2481 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2482 2482 __ andl(rdx, 0x1);
2483 2483
2484 2484 // field addresses
2485 2485 const Address lo(obj, off, Address::times_1, 0*wordSize);
2486 2486 const Address hi(obj, off, Address::times_1, 1*wordSize);
2487 2487
2488 2488 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2489 2489
2490 2490 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2491 2491 assert(btos == 0, "change code, btos != 0");
2492 2492 // btos
2493 2493 __ andl(flags, 0x0f);
2494 2494 __ jcc(Assembler::notZero, notByte);
2495 2495
2496 2496 __ pop(btos);
2497 2497 if (!is_static) pop_and_check_object(obj);
2498 2498 __ movb(lo, rax );
2499 2499 if (!is_static) {
2500 2500 patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx);
2501 2501 }
2502 2502 __ jmp(Done);
2503 2503
2504 2504 __ bind(notByte);
2505 2505 // itos
2506 2506 __ cmpl(flags, itos );
2507 2507 __ jcc(Assembler::notEqual, notInt);
2508 2508
2509 2509 __ pop(itos);
2510 2510 if (!is_static) pop_and_check_object(obj);
2511 2511
2512 2512 __ movl(lo, rax );
2513 2513 if (!is_static) {
2514 2514 patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx);
2515 2515 }
2516 2516 __ jmp(Done);
2517 2517
2518 2518 __ bind(notInt);
2519 2519 // atos
2520 2520 __ cmpl(flags, atos );
2521 2521 __ jcc(Assembler::notEqual, notObj);
2522 2522
2523 2523 __ pop(atos);
2524 2524 if (!is_static) pop_and_check_object(obj);
2525 2525
2526 2526 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2527 2527
2528 2528 if (!is_static) {
2529 2529 patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx);
2530 2530 }
2531 2531
2532 2532 __ jmp(Done);
2533 2533
2534 2534 __ bind(notObj);
2535 2535 // ctos
2536 2536 __ cmpl(flags, ctos );
2537 2537 __ jcc(Assembler::notEqual, notChar);
2538 2538
2539 2539 __ pop(ctos);
2540 2540 if (!is_static) pop_and_check_object(obj);
2541 2541 __ movw(lo, rax );
2542 2542 if (!is_static) {
2543 2543 patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx);
2544 2544 }
2545 2545 __ jmp(Done);
2546 2546
2547 2547 __ bind(notChar);
2548 2548 // stos
2549 2549 __ cmpl(flags, stos );
2550 2550 __ jcc(Assembler::notEqual, notShort);
2551 2551
2552 2552 __ pop(stos);
2553 2553 if (!is_static) pop_and_check_object(obj);
2554 2554 __ movw(lo, rax );
2555 2555 if (!is_static) {
2556 2556 patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx);
2557 2557 }
2558 2558 __ jmp(Done);
2559 2559
2560 2560 __ bind(notShort);
2561 2561 // ltos
2562 2562 __ cmpl(flags, ltos );
2563 2563 __ jcc(Assembler::notEqual, notLong);
2564 2564
2565 2565 Label notVolatileLong;
2566 2566 __ testl(rdx, rdx);
2567 2567 __ jcc(Assembler::zero, notVolatileLong);
2568 2568
2569 2569 __ pop(ltos); // overwrites rdx, do this after testing volatile.
2570 2570 if (!is_static) pop_and_check_object(obj);
2571 2571
2572 2572 // Replace with real volatile test
2573 2573 __ push(rdx);
2574 2574 __ push(rax); // Must update atomically with FIST
2575 2575 __ fild_d(Address(rsp,0)); // So load into FPU register
2576 2576 __ fistp_d(lo); // and put into memory atomically
2577 2577 __ addptr(rsp, 2*wordSize);
2578 2578 // volatile_barrier();
2579 2579 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2580 2580 Assembler::StoreStore));
2581 2581 // Don't rewrite volatile version
2582 2582 __ jmp(notVolatile);
2583 2583
2584 2584 __ bind(notVolatileLong);
2585 2585
2586 2586 __ pop(ltos); // overwrites rdx
2587 2587 if (!is_static) pop_and_check_object(obj);
2588 2588 NOT_LP64(__ movptr(hi, rdx));
2589 2589 __ movptr(lo, rax);
2590 2590 if (!is_static) {
2591 2591 patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx);
2592 2592 }
2593 2593 __ jmp(notVolatile);
2594 2594
2595 2595 __ bind(notLong);
2596 2596 // ftos
2597 2597 __ cmpl(flags, ftos );
2598 2598 __ jcc(Assembler::notEqual, notFloat);
2599 2599
2600 2600 __ pop(ftos);
2601 2601 if (!is_static) pop_and_check_object(obj);
2602 2602 __ fstp_s(lo);
2603 2603 if (!is_static) {
2604 2604 patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx);
2605 2605 }
2606 2606 __ jmp(Done);
2607 2607
2608 2608 __ bind(notFloat);
2609 2609 // dtos
2610 2610 __ cmpl(flags, dtos );
2611 2611 __ jcc(Assembler::notEqual, notDouble);
2612 2612
2613 2613 __ pop(dtos);
2614 2614 if (!is_static) pop_and_check_object(obj);
2615 2615 __ fstp_d(lo);
2616 2616 if (!is_static) {
2617 2617 patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx);
2618 2618 }
2619 2619 __ jmp(Done);
2620 2620
2621 2621 __ bind(notDouble);
2622 2622
2623 2623 __ stop("Bad state");
2624 2624
2625 2625 __ bind(Done);
2626 2626
2627 2627 // Check for volatile store
2628 2628 __ testl(rdx, rdx);
2629 2629 __ jcc(Assembler::zero, notVolatile);
2630 2630 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2631 2631 Assembler::StoreStore));
2632 2632 __ bind(notVolatile);
2633 2633 }
2634 2634
2635 2635
2636 2636 void TemplateTable::putfield(int byte_no) {
2637 2637 putfield_or_static(byte_no, false);
2638 2638 }
2639 2639
2640 2640
2641 2641 void TemplateTable::putstatic(int byte_no) {
2642 2642 putfield_or_static(byte_no, true);
2643 2643 }
2644 2644
2645 2645 void TemplateTable::jvmti_post_fast_field_mod() {
2646 2646 if (JvmtiExport::can_post_field_modification()) {
2647 2647 // Check to see if a field modification watch has been set before we take
2648 2648 // the time to call into the VM.
2649 2649 Label L2;
2650 2650 __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2651 2651 __ testl(rcx,rcx);
2652 2652 __ jcc(Assembler::zero, L2);
2653 2653 __ pop_ptr(rbx); // copy the object pointer from tos
2654 2654 __ verify_oop(rbx);
2655 2655 __ push_ptr(rbx); // put the object pointer back on tos
2656 2656 __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object
2657 2657 __ mov(rcx, rsp);
2658 2658 __ push_ptr(rbx); // save object pointer so we can steal rbx,
2659 2659 __ xorptr(rbx, rbx);
2660 2660 const Address lo_value(rcx, rbx, Address::times_1, 0*wordSize);
2661 2661 const Address hi_value(rcx, rbx, Address::times_1, 1*wordSize);
2662 2662 switch (bytecode()) { // load values into the jvalue object
2663 2663 case Bytecodes::_fast_bputfield: __ movb(lo_value, rax); break;
2664 2664 case Bytecodes::_fast_sputfield: __ movw(lo_value, rax); break;
2665 2665 case Bytecodes::_fast_cputfield: __ movw(lo_value, rax); break;
2666 2666 case Bytecodes::_fast_iputfield: __ movl(lo_value, rax); break;
2667 2667 case Bytecodes::_fast_lputfield:
2668 2668 NOT_LP64(__ movptr(hi_value, rdx));
2669 2669 __ movptr(lo_value, rax);
2670 2670 break;
2671 2671
2672 2672 // need to call fld_s() after fstp_s() to restore the value for below
2673 2673 case Bytecodes::_fast_fputfield: __ fstp_s(lo_value); __ fld_s(lo_value); break;
2674 2674
2675 2675 // need to call fld_d() after fstp_d() to restore the value for below
2676 2676 case Bytecodes::_fast_dputfield: __ fstp_d(lo_value); __ fld_d(lo_value); break;
2677 2677
2678 2678 // since rcx is not an object we don't call store_check() here
2679 2679 case Bytecodes::_fast_aputfield: __ movptr(lo_value, rax); break;
2680 2680
2681 2681 default: ShouldNotReachHere();
2682 2682 }
2683 2683 __ pop_ptr(rbx); // restore copy of object pointer
2684 2684
2685 2685 // Save rax, and sometimes rdx because call_VM() will clobber them,
2686 2686 // then use them for JVM/DI purposes
2687 2687 __ push(rax);
2688 2688 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2689 2689 // access constant pool cache entry
2690 2690 __ get_cache_entry_pointer_at_bcp(rax, rdx, 1);
2691 2691 __ verify_oop(rbx);
2692 2692 // rbx,: object pointer copied above
2693 2693 // rax,: cache entry pointer
2694 2694 // rcx: jvalue object on the stack
2695 2695 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx);
2696 2696 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx); // restore high value
2697 2697 __ pop(rax); // restore lower value
2698 2698 __ addptr(rsp, sizeof(jvalue)); // release jvalue object space
2699 2699 __ bind(L2);
2700 2700 }
2701 2701 }
2702 2702
2703 2703 void TemplateTable::fast_storefield(TosState state) {
2704 2704 transition(state, vtos);
2705 2705
2706 2706 ByteSize base = constantPoolCacheOopDesc::base_offset();
2707 2707
2708 2708 jvmti_post_fast_field_mod();
2709 2709
2710 2710 // access constant pool cache
2711 2711 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2712 2712
2713 2713 // test for volatile with rdx but rdx is tos register for lputfield.
2714 2714 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2715 2715 __ movl(rdx, Address(rcx, rbx, Address::times_ptr, in_bytes(base +
2716 2716 ConstantPoolCacheEntry::flags_offset())));
2717 2717
2718 2718 // replace index with field offset from cache entry
2719 2719 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2720 2720
2721 2721 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2722 2722 // volatile_barrier( );
2723 2723
2724 2724 Label notVolatile, Done;
2725 2725 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2726 2726 __ andl(rdx, 0x1);
2727 2727 // Check for volatile store
2728 2728 __ testl(rdx, rdx);
2729 2729 __ jcc(Assembler::zero, notVolatile);
2730 2730
2731 2731 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2732 2732
2733 2733 // Get object from stack
2734 2734 pop_and_check_object(rcx);
2735 2735
2736 2736 // field addresses
2737 2737 const Address lo(rcx, rbx, Address::times_1, 0*wordSize);
2738 2738 const Address hi(rcx, rbx, Address::times_1, 1*wordSize);
2739 2739
2740 2740 // access field
2741 2741 switch (bytecode()) {
2742 2742 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2743 2743 case Bytecodes::_fast_sputfield: // fall through
2744 2744 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2745 2745 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2746 2746 case Bytecodes::_fast_lputfield:
2747 2747 NOT_LP64(__ movptr(hi, rdx));
2748 2748 __ movptr(lo, rax);
2749 2749 break;
2750 2750 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2751 2751 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2752 2752 case Bytecodes::_fast_aputfield: {
2753 2753 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2754 2754 break;
2755 2755 }
2756 2756 default:
2757 2757 ShouldNotReachHere();
2758 2758 }
2759 2759
2760 2760 Label done;
2761 2761 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2762 2762 Assembler::StoreStore));
2763 2763 // Barriers are so large that short branch doesn't reach!
2764 2764 __ jmp(done);
2765 2765
2766 2766 // Same code as above, but don't need rdx to test for volatile.
2767 2767 __ bind(notVolatile);
2768 2768
2769 2769 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2770 2770
2771 2771 // Get object from stack
2772 2772 pop_and_check_object(rcx);
2773 2773
2774 2774 // access field
2775 2775 switch (bytecode()) {
2776 2776 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2777 2777 case Bytecodes::_fast_sputfield: // fall through
2778 2778 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2779 2779 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2780 2780 case Bytecodes::_fast_lputfield:
2781 2781 NOT_LP64(__ movptr(hi, rdx));
2782 2782 __ movptr(lo, rax);
2783 2783 break;
2784 2784 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2785 2785 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2786 2786 case Bytecodes::_fast_aputfield: {
2787 2787 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2788 2788 break;
2789 2789 }
2790 2790 default:
2791 2791 ShouldNotReachHere();
2792 2792 }
2793 2793 __ bind(done);
2794 2794 }
2795 2795
2796 2796
2797 2797 void TemplateTable::fast_accessfield(TosState state) {
2798 2798 transition(atos, state);
2799 2799
2800 2800 // do the JVMTI work here to avoid disturbing the register state below
2801 2801 if (JvmtiExport::can_post_field_access()) {
2802 2802 // Check to see if a field access watch has been set before we take
2803 2803 // the time to call into the VM.
2804 2804 Label L1;
2805 2805 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2806 2806 __ testl(rcx,rcx);
2807 2807 __ jcc(Assembler::zero, L1);
2808 2808 // access constant pool cache entry
2809 2809 __ get_cache_entry_pointer_at_bcp(rcx, rdx, 1);
2810 2810 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2811 2811 __ verify_oop(rax);
2812 2812 // rax,: object pointer copied above
2813 2813 // rcx: cache entry pointer
2814 2814 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx);
2815 2815 __ pop_ptr(rax); // restore object pointer
2816 2816 __ bind(L1);
2817 2817 }
2818 2818
2819 2819 // access constant pool cache
2820 2820 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2821 2821 // replace index with field offset from cache entry
2822 2822 __ movptr(rbx, Address(rcx,
2823 2823 rbx,
2824 2824 Address::times_ptr,
2825 2825 in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2826 2826
2827 2827
2828 2828 // rax,: object
2829 2829 __ verify_oop(rax);
2830 2830 __ null_check(rax);
2831 2831 // field addresses
2832 2832 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2833 2833 const Address hi = Address(rax, rbx, Address::times_1, 1*wordSize);
2834 2834
2835 2835 // access field
2836 2836 switch (bytecode()) {
2837 2837 case Bytecodes::_fast_bgetfield: __ movsbl(rax, lo ); break;
2838 2838 case Bytecodes::_fast_sgetfield: __ load_signed_short(rax, lo ); break;
2839 2839 case Bytecodes::_fast_cgetfield: __ load_unsigned_short(rax, lo ); break;
2840 2840 case Bytecodes::_fast_igetfield: __ movl(rax, lo); break;
2841 2841 case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten"); break;
2842 2842 case Bytecodes::_fast_fgetfield: __ fld_s(lo); break;
2843 2843 case Bytecodes::_fast_dgetfield: __ fld_d(lo); break;
2844 2844 case Bytecodes::_fast_agetfield: __ movptr(rax, lo); __ verify_oop(rax); break;
2845 2845 default:
2846 2846 ShouldNotReachHere();
2847 2847 }
2848 2848
2849 2849 // Doug Lea believes this is not needed with current Sparcs(TSO) and Intel(PSO)
2850 2850 // volatile_barrier( );
2851 2851 }
2852 2852
2853 2853 void TemplateTable::fast_xaccess(TosState state) {
2854 2854 transition(vtos, state);
2855 2855 // get receiver
2856 2856 __ movptr(rax, aaddress(0));
2857 2857 debug_only(__ verify_local_tag(frame::TagReference, 0));
2858 2858 // access constant pool cache
2859 2859 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2860 2860 __ movptr(rbx, Address(rcx,
2861 2861 rdx,
2862 2862 Address::times_ptr,
2863 2863 in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2864 2864 // make sure exception is reported in correct bcp range (getfield is next instruction)
2865 2865 __ increment(rsi);
2866 2866 __ null_check(rax);
2867 2867 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2868 2868 if (state == itos) {
2869 2869 __ movl(rax, lo);
2870 2870 } else if (state == atos) {
2871 2871 __ movptr(rax, lo);
2872 2872 __ verify_oop(rax);
2873 2873 } else if (state == ftos) {
2874 2874 __ fld_s(lo);
2875 2875 } else {
2876 2876 ShouldNotReachHere();
2877 2877 }
2878 2878 __ decrement(rsi);
2879 2879 }
2880 2880
2881 2881
2882 2882
2883 2883 //----------------------------------------------------------------------------------------------------
2884 2884 // Calls
2885 2885
2886 2886 void TemplateTable::count_calls(Register method, Register temp) {
2887 2887 // implemented elsewhere
2888 2888 ShouldNotReachHere();
2889 2889 }
2890 2890
2891 2891
2892 2892 void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
2893 2893 // determine flags
2894 2894 Bytecodes::Code code = bytecode();
2895 2895 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2896 2896 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2897 2897 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2898 2898 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2899 2899 const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
2900 2900 const bool receiver_null_check = is_invokespecial;
2901 2901 const bool save_flags = is_invokeinterface || is_invokevirtual;
2902 2902 // setup registers & access constant pool cache
2903 2903 const Register recv = rcx;
2904 2904 const Register flags = rdx;
2905 2905 assert_different_registers(method, index, recv, flags);
2906 2906
2907 2907 // save 'interpreter return address'
2908 2908 __ save_bcp();
2909 2909
2910 2910 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual);
2911 2911
2912 2912 // load receiver if needed (note: no return address pushed yet)
2913 2913 if (load_receiver) {
2914 2914 __ movl(recv, flags);
2915 2915 __ andl(recv, 0xFF);
2916 2916 // recv count is 0 based?
2917 2917 Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1));
2918 2918 if (is_invokedynamic) {
2919 2919 __ lea(recv, recv_addr);
2920 2920 } else {
2921 2921 __ movptr(recv, recv_addr);
2922 2922 __ verify_oop(recv);
2923 2923 }
2924 2924 }
2925 2925
2926 2926 // do null check if needed
2927 2927 if (receiver_null_check) {
2928 2928 __ null_check(recv);
2929 2929 }
2930 2930
2931 2931 if (save_flags) {
2932 2932 __ mov(rsi, flags);
2933 2933 }
2934 2934
2935 2935 // compute return type
2936 2936 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2937 2937 // Make sure we don't need to mask flags for tosBits after the above shift
2938 2938 ConstantPoolCacheEntry::verify_tosBits();
2939 2939 // load return address
2940 2940 {
2941 2941 address table_addr;
2942 2942 if (is_invokeinterface || is_invokedynamic)
2943 2943 table_addr = (address)Interpreter::return_5_addrs_by_index_table();
2944 2944 else
2945 2945 table_addr = (address)Interpreter::return_3_addrs_by_index_table();
2946 2946 ExternalAddress table(table_addr);
2947 2947 __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
2948 2948 }
2949 2949
2950 2950 // push return address
2951 2951 __ push(flags);
2952 2952
2953 2953 // Restore flag value from the constant pool cache, and restore rsi
2954 2954 // for later null checks. rsi is the bytecode pointer
2955 2955 if (save_flags) {
2956 2956 __ mov(flags, rsi);
2957 2957 __ restore_bcp();
2958 2958 }
2959 2959 }
2960 2960
2961 2961
2962 2962 void TemplateTable::invokevirtual_helper(Register index, Register recv,
2963 2963 Register flags) {
2964 2964
2965 2965 // Uses temporary registers rax, rdx
2966 2966 assert_different_registers(index, recv, rax, rdx);
2967 2967
2968 2968 // Test for an invoke of a final method
2969 2969 Label notFinal;
2970 2970 __ movl(rax, flags);
2971 2971 __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod));
2972 2972 __ jcc(Assembler::zero, notFinal);
2973 2973
2974 2974 Register method = index; // method must be rbx,
2975 2975 assert(method == rbx, "methodOop must be rbx, for interpreter calling convention");
2976 2976
2977 2977 // do the call - the index is actually the method to call
2978 2978 __ verify_oop(method);
2979 2979
2980 2980 // It's final, need a null check here!
2981 2981 __ null_check(recv);
2982 2982
2983 2983 // profile this call
2984 2984 __ profile_final_call(rax);
2985 2985
2986 2986 __ jump_from_interpreted(method, rax);
2987 2987
2988 2988 __ bind(notFinal);
2989 2989
2990 2990 // get receiver klass
2991 2991 __ null_check(recv, oopDesc::klass_offset_in_bytes());
2992 2992 // Keep recv in rcx for callee expects it there
2993 2993 __ movptr(rax, Address(recv, oopDesc::klass_offset_in_bytes()));
2994 2994 __ verify_oop(rax);
2995 2995
2996 2996 // profile this call
2997 2997 __ profile_virtual_call(rax, rdi, rdx);
2998 2998
2999 2999 // get target methodOop & entry point
3000 3000 const int base = instanceKlass::vtable_start_offset() * wordSize;
3001 3001 assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below");
3002 3002 __ movptr(method, Address(rax, index, Address::times_ptr, base + vtableEntry::method_offset_in_bytes()));
3003 3003 __ jump_from_interpreted(method, rdx);
3004 3004 }
3005 3005
3006 3006
3007 3007 void TemplateTable::invokevirtual(int byte_no) {
3008 3008 transition(vtos, vtos);
3009 3009 prepare_invoke(rbx, noreg, byte_no);
3010 3010
3011 3011 // rbx,: index
3012 3012 // rcx: receiver
3013 3013 // rdx: flags
3014 3014
3015 3015 invokevirtual_helper(rbx, rcx, rdx);
3016 3016 }
3017 3017
3018 3018
3019 3019 void TemplateTable::invokespecial(int byte_no) {
3020 3020 transition(vtos, vtos);
3021 3021 prepare_invoke(rbx, noreg, byte_no);
3022 3022 // do the call
3023 3023 __ verify_oop(rbx);
3024 3024 __ profile_call(rax);
3025 3025 __ jump_from_interpreted(rbx, rax);
3026 3026 }
3027 3027
3028 3028
3029 3029 void TemplateTable::invokestatic(int byte_no) {
3030 3030 transition(vtos, vtos);
3031 3031 prepare_invoke(rbx, noreg, byte_no);
3032 3032 // do the call
3033 3033 __ verify_oop(rbx);
3034 3034 __ profile_call(rax);
3035 3035 __ jump_from_interpreted(rbx, rax);
3036 3036 }
3037 3037
3038 3038
3039 3039 void TemplateTable::fast_invokevfinal(int byte_no) {
3040 3040 transition(vtos, vtos);
3041 3041 __ stop("fast_invokevfinal not used on x86");
3042 3042 }
3043 3043
3044 3044
3045 3045 void TemplateTable::invokeinterface(int byte_no) {
3046 3046 transition(vtos, vtos);
3047 3047 prepare_invoke(rax, rbx, byte_no);
3048 3048
3049 3049 // rax,: Interface
3050 3050 // rbx,: index
3051 3051 // rcx: receiver
3052 3052 // rdx: flags
3053 3053
3054 3054 // Special case of invokeinterface called for virtual method of
3055 3055 // java.lang.Object. See cpCacheOop.cpp for details.
3056 3056 // This code isn't produced by javac, but could be produced by
3057 3057 // another compliant java compiler.
3058 3058 Label notMethod;
3059 3059 __ movl(rdi, rdx);
3060 3060 __ andl(rdi, (1 << ConstantPoolCacheEntry::methodInterface));
3061 3061 __ jcc(Assembler::zero, notMethod);
3062 3062
3063 3063 invokevirtual_helper(rbx, rcx, rdx);
3064 3064 __ bind(notMethod);
3065 3065
3066 3066 // Get receiver klass into rdx - also a null check
3067 3067 __ restore_locals(); // restore rdi
3068 3068 __ movptr(rdx, Address(rcx, oopDesc::klass_offset_in_bytes()));
3069 3069 __ verify_oop(rdx);
3070 3070
3071 3071 // profile this call
3072 3072 __ profile_virtual_call(rdx, rsi, rdi);
3073 3073
3074 3074 Label no_such_interface, no_such_method;
3075 3075
3076 3076 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3077 3077 rdx, rax, rbx,
3078 3078 // outputs: method, scan temp. reg
3079 3079 rbx, rsi,
3080 3080 no_such_interface);
3081 3081
3082 3082 // rbx,: methodOop to call
3083 3083 // rcx: receiver
3084 3084 // Check for abstract method error
3085 3085 // Note: This should be done more efficiently via a throw_abstract_method_error
3086 3086 // interpreter entry point and a conditional jump to it in case of a null
3087 3087 // method.
3088 3088 __ testptr(rbx, rbx);
3089 3089 __ jcc(Assembler::zero, no_such_method);
3090 3090
3091 3091 // do the call
3092 3092 // rcx: receiver
3093 3093 // rbx,: methodOop
3094 3094 __ jump_from_interpreted(rbx, rdx);
3095 3095 __ should_not_reach_here();
3096 3096
3097 3097 // exception handling code follows...
3098 3098 // note: must restore interpreter registers to canonical
3099 3099 // state for exception handling to work correctly!
3100 3100
3101 3101 __ bind(no_such_method);
3102 3102 // throw exception
3103 3103 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3104 3104 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3105 3105 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3106 3106 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3107 3107 // the call_VM checks for exception, so we should never return here.
3108 3108 __ should_not_reach_here();
3109 3109
3110 3110 __ bind(no_such_interface);
3111 3111 // throw exception
3112 3112 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3113 3113 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3114 3114 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3115 3115 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3116 3116 InterpreterRuntime::throw_IncompatibleClassChangeError));
3117 3117 // the call_VM checks for exception, so we should never return here.
3118 3118 __ should_not_reach_here();
3119 3119 }
3120 3120
3121 3121 void TemplateTable::invokedynamic(int byte_no) {
3122 3122 transition(vtos, vtos);
3123 3123
3124 3124 if (!EnableInvokeDynamic) {
3125 3125 // We should not encounter this bytecode if !EnableInvokeDynamic.
3126 3126 // The verifier will stop it. However, if we get past the verifier,
3127 3127 // this will stop the thread in a reasonable way, without crashing the JVM.
3128 3128 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3129 3129 InterpreterRuntime::throw_IncompatibleClassChangeError));
3130 3130 // the call_VM checks for exception, so we should never return here.
3131 3131 __ should_not_reach_here();
3132 3132 return;
3133 3133 }
3134 3134
3135 3135 prepare_invoke(rax, rbx, byte_no);
3136 3136
3137 3137 // rax: CallSite object (f1)
3138 3138 // rbx: unused (f2)
↓ open down ↓ |
3138 lines elided |
↑ open up ↑ |
3139 3139 // rcx: receiver address
3140 3140 // rdx: flags (unused)
3141 3141
3142 3142 if (ProfileInterpreter) {
3143 3143 Label L;
3144 3144 // %%% should make a type profile for any invokedynamic that takes a ref argument
3145 3145 // profile this call
3146 3146 __ profile_call(rsi);
3147 3147 }
3148 3148
3149 - Label handle_unlinked_site;
3150 3149 __ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
3151 3150 __ null_check(rcx);
3152 3151 __ prepare_to_jump_from_interpreted();
3153 3152 __ jump_to_method_handle_entry(rcx, rdx);
3154 3153 }
3155 3154
3156 3155 //----------------------------------------------------------------------------------------------------
3157 3156 // Allocation
3158 3157
3159 3158 void TemplateTable::_new() {
3160 3159 transition(vtos, atos);
3161 3160 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3162 3161 Label slow_case;
3163 3162 Label done;
3164 3163 Label initialize_header;
3165 3164 Label initialize_object; // including clearing the fields
3166 3165 Label allocate_shared;
3167 3166
3168 3167 __ get_cpool_and_tags(rcx, rax);
3169 3168 // get instanceKlass
3170 3169 __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3171 3170 __ push(rcx); // save the contexts of klass for initializing the header
3172 3171
3173 3172 // make sure the class we're about to instantiate has been resolved.
3174 3173 // Note: slow_case does a pop of stack, which is why we loaded class/pushed above
3175 3174 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
3176 3175 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
3177 3176 __ jcc(Assembler::notEqual, slow_case);
3178 3177
3179 3178 // make sure klass is initialized & doesn't have finalizer
3180 3179 // make sure klass is fully initialized
3181 3180 __ cmpl(Address(rcx, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
3182 3181 __ jcc(Assembler::notEqual, slow_case);
3183 3182
3184 3183 // get instance_size in instanceKlass (scaled to a count of bytes)
3185 3184 __ movl(rdx, Address(rcx, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
3186 3185 // test to see if it has a finalizer or is malformed in some way
3187 3186 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3188 3187 __ jcc(Assembler::notZero, slow_case);
3189 3188
3190 3189 //
3191 3190 // Allocate the instance
3192 3191 // 1) Try to allocate in the TLAB
3193 3192 // 2) if fail and the object is large allocate in the shared Eden
3194 3193 // 3) if the above fails (or is not applicable), go to a slow case
3195 3194 // (creates a new TLAB, etc.)
3196 3195
3197 3196 const bool allow_shared_alloc =
3198 3197 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3199 3198
3200 3199 if (UseTLAB) {
3201 3200 const Register thread = rcx;
3202 3201
3203 3202 __ get_thread(thread);
3204 3203 __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
3205 3204 __ lea(rbx, Address(rax, rdx, Address::times_1));
3206 3205 __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
3207 3206 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3208 3207 __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3209 3208 if (ZeroTLAB) {
3210 3209 // the fields have been already cleared
3211 3210 __ jmp(initialize_header);
3212 3211 } else {
3213 3212 // initialize both the header and fields
3214 3213 __ jmp(initialize_object);
3215 3214 }
3216 3215 }
3217 3216
3218 3217 // Allocation in the shared Eden, if allowed.
3219 3218 //
3220 3219 // rdx: instance size in bytes
3221 3220 if (allow_shared_alloc) {
3222 3221 __ bind(allocate_shared);
3223 3222
3224 3223 ExternalAddress heap_top((address)Universe::heap()->top_addr());
3225 3224
3226 3225 Label retry;
3227 3226 __ bind(retry);
3228 3227 __ movptr(rax, heap_top);
3229 3228 __ lea(rbx, Address(rax, rdx, Address::times_1));
3230 3229 __ cmpptr(rbx, ExternalAddress((address)Universe::heap()->end_addr()));
3231 3230 __ jcc(Assembler::above, slow_case);
3232 3231
3233 3232 // Compare rax, with the top addr, and if still equal, store the new
3234 3233 // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
3235 3234 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3236 3235 //
3237 3236 // rax,: object begin
3238 3237 // rbx,: object end
3239 3238 // rdx: instance size in bytes
3240 3239 __ locked_cmpxchgptr(rbx, heap_top);
3241 3240
3242 3241 // if someone beat us on the allocation, try again, otherwise continue
3243 3242 __ jcc(Assembler::notEqual, retry);
3244 3243 }
3245 3244
3246 3245 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3247 3246 // The object is initialized before the header. If the object size is
3248 3247 // zero, go directly to the header initialization.
3249 3248 __ bind(initialize_object);
3250 3249 __ decrement(rdx, sizeof(oopDesc));
3251 3250 __ jcc(Assembler::zero, initialize_header);
3252 3251
3253 3252 // Initialize topmost object field, divide rdx by 8, check if odd and
3254 3253 // test if zero.
3255 3254 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3256 3255 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3257 3256
3258 3257 // rdx must have been multiple of 8
3259 3258 #ifdef ASSERT
3260 3259 // make sure rdx was multiple of 8
3261 3260 Label L;
3262 3261 // Ignore partial flag stall after shrl() since it is debug VM
3263 3262 __ jccb(Assembler::carryClear, L);
3264 3263 __ stop("object size is not multiple of 2 - adjust this code");
3265 3264 __ bind(L);
3266 3265 // rdx must be > 0, no extra check needed here
3267 3266 #endif
3268 3267
3269 3268 // initialize remaining object fields: rdx was a multiple of 8
3270 3269 { Label loop;
3271 3270 __ bind(loop);
3272 3271 __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
3273 3272 NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
3274 3273 __ decrement(rdx);
3275 3274 __ jcc(Assembler::notZero, loop);
3276 3275 }
3277 3276
3278 3277 // initialize object header only.
3279 3278 __ bind(initialize_header);
3280 3279 if (UseBiasedLocking) {
3281 3280 __ pop(rcx); // get saved klass back in the register.
3282 3281 __ movptr(rbx, Address(rcx, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
3283 3282 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
3284 3283 } else {
3285 3284 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
3286 3285 (int32_t)markOopDesc::prototype()); // header
3287 3286 __ pop(rcx); // get saved klass back in the register.
3288 3287 }
3289 3288 __ movptr(Address(rax, oopDesc::klass_offset_in_bytes()), rcx); // klass
3290 3289
3291 3290 {
3292 3291 SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
3293 3292 // Trigger dtrace event for fastpath
3294 3293 __ push(atos);
3295 3294 __ call_VM_leaf(
3296 3295 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3297 3296 __ pop(atos);
3298 3297 }
3299 3298
3300 3299 __ jmp(done);
3301 3300 }
3302 3301
3303 3302 // slow case
3304 3303 __ bind(slow_case);
3305 3304 __ pop(rcx); // restore stack pointer to what it was when we came in.
3306 3305 __ get_constant_pool(rax);
3307 3306 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3308 3307 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rax, rdx);
3309 3308
3310 3309 // continue
3311 3310 __ bind(done);
3312 3311 }
3313 3312
3314 3313
3315 3314 void TemplateTable::newarray() {
3316 3315 transition(itos, atos);
3317 3316 __ push_i(rax); // make sure everything is on the stack
3318 3317 __ load_unsigned_byte(rdx, at_bcp(1));
3319 3318 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), rdx, rax);
3320 3319 __ pop_i(rdx); // discard size
3321 3320 }
3322 3321
3323 3322
3324 3323 void TemplateTable::anewarray() {
3325 3324 transition(itos, atos);
3326 3325 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3327 3326 __ get_constant_pool(rcx);
3328 3327 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), rcx, rdx, rax);
3329 3328 }
3330 3329
3331 3330
3332 3331 void TemplateTable::arraylength() {
3333 3332 transition(atos, itos);
3334 3333 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3335 3334 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3336 3335 }
3337 3336
3338 3337
3339 3338 void TemplateTable::checkcast() {
3340 3339 transition(atos, atos);
3341 3340 Label done, is_null, ok_is_subtype, quicked, resolved;
3342 3341 __ testptr(rax, rax); // Object is in EAX
3343 3342 __ jcc(Assembler::zero, is_null);
3344 3343
3345 3344 // Get cpool & tags index
3346 3345 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3347 3346 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3348 3347 // See if bytecode has already been quicked
3349 3348 __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
3350 3349 __ jcc(Assembler::equal, quicked);
3351 3350
3352 3351 __ push(atos);
3353 3352 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3354 3353 __ pop_ptr(rdx);
3355 3354 __ jmpb(resolved);
3356 3355
3357 3356 // Get superklass in EAX and subklass in EBX
3358 3357 __ bind(quicked);
3359 3358 __ mov(rdx, rax); // Save object in EDX; EAX needed for subtype check
3360 3359 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3361 3360
3362 3361 __ bind(resolved);
3363 3362 __ movptr(rbx, Address(rdx, oopDesc::klass_offset_in_bytes()));
3364 3363
3365 3364 // Generate subtype check. Blows ECX. Resets EDI. Object in EDX.
3366 3365 // Superklass in EAX. Subklass in EBX.
3367 3366 __ gen_subtype_check( rbx, ok_is_subtype );
3368 3367
3369 3368 // Come here on failure
3370 3369 __ push(rdx);
3371 3370 // object is at TOS
3372 3371 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3373 3372
3374 3373 // Come here on success
3375 3374 __ bind(ok_is_subtype);
3376 3375 __ mov(rax,rdx); // Restore object in EDX
3377 3376
3378 3377 // Collect counts on whether this check-cast sees NULLs a lot or not.
3379 3378 if (ProfileInterpreter) {
3380 3379 __ jmp(done);
3381 3380 __ bind(is_null);
3382 3381 __ profile_null_seen(rcx);
3383 3382 } else {
3384 3383 __ bind(is_null); // same as 'done'
3385 3384 }
3386 3385 __ bind(done);
3387 3386 }
3388 3387
3389 3388
3390 3389 void TemplateTable::instanceof() {
3391 3390 transition(atos, itos);
3392 3391 Label done, is_null, ok_is_subtype, quicked, resolved;
3393 3392 __ testptr(rax, rax);
3394 3393 __ jcc(Assembler::zero, is_null);
3395 3394
3396 3395 // Get cpool & tags index
3397 3396 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3398 3397 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3399 3398 // See if bytecode has already been quicked
3400 3399 __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
3401 3400 __ jcc(Assembler::equal, quicked);
3402 3401
3403 3402 __ push(atos);
3404 3403 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3405 3404 __ pop_ptr(rdx);
3406 3405 __ movptr(rdx, Address(rdx, oopDesc::klass_offset_in_bytes()));
3407 3406 __ jmp(resolved);
3408 3407
3409 3408 // Get superklass in EAX and subklass in EDX
3410 3409 __ bind(quicked);
3411 3410 __ movptr(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
3412 3411 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3413 3412
3414 3413 __ bind(resolved);
3415 3414
3416 3415 // Generate subtype check. Blows ECX. Resets EDI.
3417 3416 // Superklass in EAX. Subklass in EDX.
3418 3417 __ gen_subtype_check( rdx, ok_is_subtype );
3419 3418
3420 3419 // Come here on failure
3421 3420 __ xorl(rax,rax);
3422 3421 __ jmpb(done);
3423 3422 // Come here on success
3424 3423 __ bind(ok_is_subtype);
3425 3424 __ movl(rax, 1);
3426 3425
3427 3426 // Collect counts on whether this test sees NULLs a lot or not.
3428 3427 if (ProfileInterpreter) {
3429 3428 __ jmp(done);
3430 3429 __ bind(is_null);
3431 3430 __ profile_null_seen(rcx);
3432 3431 } else {
3433 3432 __ bind(is_null); // same as 'done'
3434 3433 }
3435 3434 __ bind(done);
3436 3435 // rax, = 0: obj == NULL or obj is not an instanceof the specified klass
3437 3436 // rax, = 1: obj != NULL and obj is an instanceof the specified klass
3438 3437 }
3439 3438
3440 3439
3441 3440 //----------------------------------------------------------------------------------------------------
3442 3441 // Breakpoints
3443 3442 void TemplateTable::_breakpoint() {
3444 3443
3445 3444 // Note: We get here even if we are single stepping..
3446 3445 // jbug inists on setting breakpoints at every bytecode
3447 3446 // even if we are in single step mode.
3448 3447
3449 3448 transition(vtos, vtos);
3450 3449
3451 3450 // get the unpatched byte code
3452 3451 __ get_method(rcx);
3453 3452 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), rcx, rsi);
3454 3453 __ mov(rbx, rax);
3455 3454
3456 3455 // post the breakpoint event
3457 3456 __ get_method(rcx);
3458 3457 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), rcx, rsi);
3459 3458
3460 3459 // complete the execution of original bytecode
3461 3460 __ dispatch_only_normal(vtos);
3462 3461 }
3463 3462
3464 3463
3465 3464 //----------------------------------------------------------------------------------------------------
3466 3465 // Exceptions
3467 3466
3468 3467 void TemplateTable::athrow() {
3469 3468 transition(atos, vtos);
3470 3469 __ null_check(rax);
3471 3470 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3472 3471 }
3473 3472
3474 3473
3475 3474 //----------------------------------------------------------------------------------------------------
3476 3475 // Synchronization
3477 3476 //
3478 3477 // Note: monitorenter & exit are symmetric routines; which is reflected
3479 3478 // in the assembly code structure as well
3480 3479 //
3481 3480 // Stack layout:
3482 3481 //
3483 3482 // [expressions ] <--- rsp = expression stack top
3484 3483 // ..
3485 3484 // [expressions ]
3486 3485 // [monitor entry] <--- monitor block top = expression stack bot
3487 3486 // ..
3488 3487 // [monitor entry]
3489 3488 // [frame data ] <--- monitor block bot
3490 3489 // ...
3491 3490 // [saved rbp, ] <--- rbp,
3492 3491
3493 3492
3494 3493 void TemplateTable::monitorenter() {
3495 3494 transition(atos, vtos);
3496 3495
3497 3496 // check for NULL object
3498 3497 __ null_check(rax);
3499 3498
3500 3499 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3501 3500 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3502 3501 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3503 3502 Label allocated;
3504 3503
3505 3504 // initialize entry pointer
3506 3505 __ xorl(rdx, rdx); // points to free slot or NULL
3507 3506
3508 3507 // find a free slot in the monitor block (result in rdx)
3509 3508 { Label entry, loop, exit;
3510 3509 __ movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry
3511 3510 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3512 3511 __ jmpb(entry);
3513 3512
3514 3513 __ bind(loop);
3515 3514 __ cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used
3516 3515
3517 3516 // TODO - need new func here - kbt
3518 3517 if (VM_Version::supports_cmov()) {
3519 3518 __ cmov(Assembler::equal, rdx, rcx); // if not used then remember entry in rdx
3520 3519 } else {
3521 3520 Label L;
3522 3521 __ jccb(Assembler::notEqual, L);
3523 3522 __ mov(rdx, rcx); // if not used then remember entry in rdx
3524 3523 __ bind(L);
3525 3524 }
3526 3525 __ cmpptr(rax, Address(rcx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3527 3526 __ jccb(Assembler::equal, exit); // if same object then stop searching
3528 3527 __ addptr(rcx, entry_size); // otherwise advance to next entry
3529 3528 __ bind(entry);
3530 3529 __ cmpptr(rcx, rbx); // check if bottom reached
3531 3530 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3532 3531 __ bind(exit);
3533 3532 }
3534 3533
3535 3534 __ testptr(rdx, rdx); // check if a slot has been found
3536 3535 __ jccb(Assembler::notZero, allocated); // if found, continue with that one
3537 3536
3538 3537 // allocate one if there's no free slot
3539 3538 { Label entry, loop;
3540 3539 // 1. compute new pointers // rsp: old expression stack top
3541 3540 __ movptr(rdx, monitor_block_bot); // rdx: old expression stack bottom
3542 3541 __ subptr(rsp, entry_size); // move expression stack top
3543 3542 __ subptr(rdx, entry_size); // move expression stack bottom
3544 3543 __ mov(rcx, rsp); // set start value for copy loop
3545 3544 __ movptr(monitor_block_bot, rdx); // set new monitor block top
3546 3545 __ jmp(entry);
3547 3546 // 2. move expression stack contents
3548 3547 __ bind(loop);
3549 3548 __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location
3550 3549 __ movptr(Address(rcx, 0), rbx); // and store it at new location
3551 3550 __ addptr(rcx, wordSize); // advance to next word
3552 3551 __ bind(entry);
3553 3552 __ cmpptr(rcx, rdx); // check if bottom reached
3554 3553 __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word
3555 3554 }
3556 3555
3557 3556 // call run-time routine
3558 3557 // rdx: points to monitor entry
3559 3558 __ bind(allocated);
3560 3559
3561 3560 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3562 3561 // The object has already been poped from the stack, so the expression stack looks correct.
3563 3562 __ increment(rsi);
3564 3563
3565 3564 __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
3566 3565 __ lock_object(rdx);
3567 3566
3568 3567 // check to make sure this monitor doesn't cause stack overflow after locking
3569 3568 __ save_bcp(); // in case of exception
3570 3569 __ generate_stack_overflow_check(0);
3571 3570
3572 3571 // The bcp has already been incremented. Just need to dispatch to next instruction.
3573 3572 __ dispatch_next(vtos);
3574 3573 }
3575 3574
3576 3575
3577 3576 void TemplateTable::monitorexit() {
3578 3577 transition(atos, vtos);
3579 3578
3580 3579 // check for NULL object
3581 3580 __ null_check(rax);
3582 3581
3583 3582 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3584 3583 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3585 3584 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3586 3585 Label found;
3587 3586
3588 3587 // find matching slot
3589 3588 { Label entry, loop;
3590 3589 __ movptr(rdx, monitor_block_top); // points to current entry, starting with top-most entry
3591 3590 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3592 3591 __ jmpb(entry);
3593 3592
3594 3593 __ bind(loop);
3595 3594 __ cmpptr(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3596 3595 __ jcc(Assembler::equal, found); // if same object then stop searching
3597 3596 __ addptr(rdx, entry_size); // otherwise advance to next entry
3598 3597 __ bind(entry);
3599 3598 __ cmpptr(rdx, rbx); // check if bottom reached
3600 3599 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3601 3600 }
3602 3601
3603 3602 // error handling. Unlocking was not block-structured
3604 3603 Label end;
3605 3604 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3606 3605 __ should_not_reach_here();
3607 3606
3608 3607 // call run-time routine
3609 3608 // rcx: points to monitor entry
3610 3609 __ bind(found);
3611 3610 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3612 3611 __ unlock_object(rdx);
3613 3612 __ pop_ptr(rax); // discard object
3614 3613 __ bind(end);
3615 3614 }
3616 3615
3617 3616
3618 3617 //----------------------------------------------------------------------------------------------------
3619 3618 // Wide instructions
3620 3619
3621 3620 void TemplateTable::wide() {
3622 3621 transition(vtos, vtos);
3623 3622 __ load_unsigned_byte(rbx, at_bcp(1));
3624 3623 ExternalAddress wtable((address)Interpreter::_wentry_point);
3625 3624 __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
3626 3625 // Note: the rsi increment step is part of the individual wide bytecode implementations
3627 3626 }
3628 3627
3629 3628
3630 3629 //----------------------------------------------------------------------------------------------------
3631 3630 // Multi arrays
3632 3631
3633 3632 void TemplateTable::multianewarray() {
3634 3633 transition(vtos, atos);
3635 3634 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3636 3635 // last dim is on top of stack; we want address of first one:
3637 3636 // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
3638 3637 // the latter wordSize to point to the beginning of the array.
3639 3638 __ lea( rax, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
3640 3639 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rax); // pass in rax,
3641 3640 __ load_unsigned_byte(rbx, at_bcp(3));
3642 3641 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts
3643 3642 }
3644 3643
3645 3644 #endif /* !CC_INTERP */
↓ open down ↓ |
486 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX