Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/x86/vm/templateTable_x86_32.cpp
+++ new/src/cpu/x86/vm/templateTable_x86_32.cpp
1 1 /*
2 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "interpreter/interpreter.hpp"
27 27 #include "interpreter/interpreterRuntime.hpp"
28 28 #include "interpreter/templateTable.hpp"
29 29 #include "memory/universe.inline.hpp"
30 30 #include "oops/methodDataOop.hpp"
31 31 #include "oops/objArrayKlass.hpp"
32 32 #include "oops/oop.inline.hpp"
33 33 #include "prims/methodHandles.hpp"
34 34 #include "runtime/sharedRuntime.hpp"
35 35 #include "runtime/stubRoutines.hpp"
36 36 #include "runtime/synchronizer.hpp"
37 37
38 38 #ifndef CC_INTERP
39 39 #define __ _masm->
40 40
41 41 //----------------------------------------------------------------------------------------------------
42 42 // Platform-dependent initialization
43 43
44 44 void TemplateTable::pd_initialize() {
45 45 // No i486 specific initialization
46 46 }
47 47
48 48 //----------------------------------------------------------------------------------------------------
49 49 // Address computation
50 50
51 51 // local variables
52 52 static inline Address iaddress(int n) {
53 53 return Address(rdi, Interpreter::local_offset_in_bytes(n));
54 54 }
55 55
56 56 static inline Address laddress(int n) { return iaddress(n + 1); }
57 57 static inline Address haddress(int n) { return iaddress(n + 0); }
58 58 static inline Address faddress(int n) { return iaddress(n); }
59 59 static inline Address daddress(int n) { return laddress(n); }
60 60 static inline Address aaddress(int n) { return iaddress(n); }
61 61
62 62 static inline Address iaddress(Register r) {
63 63 return Address(rdi, r, Interpreter::stackElementScale());
64 64 }
65 65 static inline Address laddress(Register r) {
66 66 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(1));
67 67 }
68 68 static inline Address haddress(Register r) {
69 69 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
70 70 }
71 71
72 72 static inline Address faddress(Register r) { return iaddress(r); }
73 73 static inline Address daddress(Register r) { return laddress(r); }
74 74 static inline Address aaddress(Register r) { return iaddress(r); }
75 75
76 76 // expression stack
77 77 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
78 78 // data beyond the rsp which is potentially unsafe in an MT environment;
79 79 // an interrupt may overwrite that data.)
80 80 static inline Address at_rsp () {
81 81 return Address(rsp, 0);
82 82 }
83 83
84 84 // At top of Java expression stack which may be different than rsp(). It
85 85 // isn't for category 1 objects.
86 86 static inline Address at_tos () {
87 87 Address tos = Address(rsp, Interpreter::expr_offset_in_bytes(0));
88 88 return tos;
89 89 }
90 90
91 91 static inline Address at_tos_p1() {
92 92 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
93 93 }
94 94
95 95 static inline Address at_tos_p2() {
96 96 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
97 97 }
98 98
99 99 // Condition conversion
100 100 static Assembler::Condition j_not(TemplateTable::Condition cc) {
101 101 switch (cc) {
102 102 case TemplateTable::equal : return Assembler::notEqual;
103 103 case TemplateTable::not_equal : return Assembler::equal;
104 104 case TemplateTable::less : return Assembler::greaterEqual;
105 105 case TemplateTable::less_equal : return Assembler::greater;
106 106 case TemplateTable::greater : return Assembler::lessEqual;
107 107 case TemplateTable::greater_equal: return Assembler::less;
108 108 }
109 109 ShouldNotReachHere();
110 110 return Assembler::zero;
111 111 }
112 112
113 113
114 114 //----------------------------------------------------------------------------------------------------
115 115 // Miscelaneous helper routines
116 116
117 117 // Store an oop (or NULL) at the address described by obj.
118 118 // If val == noreg this means store a NULL
119 119
120 120 static void do_oop_store(InterpreterMacroAssembler* _masm,
121 121 Address obj,
122 122 Register val,
123 123 BarrierSet::Name barrier,
124 124 bool precise) {
125 125 assert(val == noreg || val == rax, "parameter is just for looks");
126 126 switch (barrier) {
127 127 #ifndef SERIALGC
128 128 case BarrierSet::G1SATBCT:
129 129 case BarrierSet::G1SATBCTLogging:
130 130 {
131 131 // flatten object address if needed
132 132 // We do it regardless of precise because we need the registers
133 133 if (obj.index() == noreg && obj.disp() == 0) {
134 134 if (obj.base() != rdx) {
135 135 __ movl(rdx, obj.base());
136 136 }
137 137 } else {
138 138 __ leal(rdx, obj);
139 139 }
140 140 __ get_thread(rcx);
141 141 __ save_bcp();
142 142 __ g1_write_barrier_pre(rdx, rcx, rsi, rbx, val != noreg);
143 143
144 144 // Do the actual store
145 145 // noreg means NULL
146 146 if (val == noreg) {
147 147 __ movptr(Address(rdx, 0), NULL_WORD);
148 148 // No post barrier for NULL
149 149 } else {
150 150 __ movl(Address(rdx, 0), val);
151 151 __ g1_write_barrier_post(rdx, rax, rcx, rbx, rsi);
152 152 }
153 153 __ restore_bcp();
154 154
155 155 }
156 156 break;
157 157 #endif // SERIALGC
158 158 case BarrierSet::CardTableModRef:
159 159 case BarrierSet::CardTableExtension:
160 160 {
161 161 if (val == noreg) {
162 162 __ movptr(obj, NULL_WORD);
163 163 } else {
164 164 __ movl(obj, val);
165 165 // flatten object address if needed
166 166 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
167 167 __ store_check(obj.base());
168 168 } else {
169 169 __ leal(rdx, obj);
170 170 __ store_check(rdx);
171 171 }
172 172 }
173 173 }
174 174 break;
175 175 case BarrierSet::ModRef:
176 176 case BarrierSet::Other:
177 177 if (val == noreg) {
178 178 __ movptr(obj, NULL_WORD);
179 179 } else {
180 180 __ movl(obj, val);
181 181 }
182 182 break;
183 183 default :
184 184 ShouldNotReachHere();
185 185
186 186 }
187 187 }
188 188
189 189 Address TemplateTable::at_bcp(int offset) {
190 190 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
191 191 return Address(rsi, offset);
192 192 }
193 193
194 194
195 195 void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc,
196 196 Register scratch,
197 197 bool load_bc_into_scratch/*=true*/) {
198 198
199 199 if (!RewriteBytecodes) return;
200 200 // the pair bytecodes have already done the load.
201 201 if (load_bc_into_scratch) {
202 202 __ movl(bc, bytecode);
203 203 }
204 204 Label patch_done;
205 205 if (JvmtiExport::can_post_breakpoint()) {
206 206 Label fast_patch;
207 207 // if a breakpoint is present we can't rewrite the stream directly
208 208 __ movzbl(scratch, at_bcp(0));
209 209 __ cmpl(scratch, Bytecodes::_breakpoint);
210 210 __ jcc(Assembler::notEqual, fast_patch);
211 211 __ get_method(scratch);
212 212 // Let breakpoint table handling rewrite to quicker bytecode
213 213 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, rsi, bc);
214 214 #ifndef ASSERT
215 215 __ jmpb(patch_done);
216 216 #else
217 217 __ jmp(patch_done);
218 218 #endif
219 219 __ bind(fast_patch);
220 220 }
221 221 #ifdef ASSERT
222 222 Label okay;
223 223 __ load_unsigned_byte(scratch, at_bcp(0));
224 224 __ cmpl(scratch, (int)Bytecodes::java_code(bytecode));
225 225 __ jccb(Assembler::equal, okay);
226 226 __ cmpl(scratch, bc);
227 227 __ jcc(Assembler::equal, okay);
228 228 __ stop("patching the wrong bytecode");
229 229 __ bind(okay);
230 230 #endif
231 231 // patch bytecode
232 232 __ movb(at_bcp(0), bc);
233 233 __ bind(patch_done);
234 234 }
235 235
236 236 //----------------------------------------------------------------------------------------------------
237 237 // Individual instructions
238 238
239 239 void TemplateTable::nop() {
240 240 transition(vtos, vtos);
241 241 // nothing to do
242 242 }
243 243
244 244 void TemplateTable::shouldnotreachhere() {
245 245 transition(vtos, vtos);
246 246 __ stop("shouldnotreachhere bytecode");
247 247 }
248 248
249 249
250 250
251 251 void TemplateTable::aconst_null() {
252 252 transition(vtos, atos);
253 253 __ xorptr(rax, rax);
254 254 }
255 255
256 256
257 257 void TemplateTable::iconst(int value) {
258 258 transition(vtos, itos);
259 259 if (value == 0) {
260 260 __ xorptr(rax, rax);
261 261 } else {
262 262 __ movptr(rax, value);
263 263 }
264 264 }
265 265
266 266
267 267 void TemplateTable::lconst(int value) {
268 268 transition(vtos, ltos);
269 269 if (value == 0) {
270 270 __ xorptr(rax, rax);
271 271 } else {
272 272 __ movptr(rax, value);
273 273 }
274 274 assert(value >= 0, "check this code");
275 275 __ xorptr(rdx, rdx);
276 276 }
277 277
278 278
279 279 void TemplateTable::fconst(int value) {
280 280 transition(vtos, ftos);
281 281 if (value == 0) { __ fldz();
282 282 } else if (value == 1) { __ fld1();
283 283 } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
284 284 } else { ShouldNotReachHere();
285 285 }
286 286 }
287 287
288 288
289 289 void TemplateTable::dconst(int value) {
290 290 transition(vtos, dtos);
291 291 if (value == 0) { __ fldz();
292 292 } else if (value == 1) { __ fld1();
293 293 } else { ShouldNotReachHere();
294 294 }
295 295 }
296 296
297 297
298 298 void TemplateTable::bipush() {
299 299 transition(vtos, itos);
300 300 __ load_signed_byte(rax, at_bcp(1));
301 301 }
302 302
303 303
304 304 void TemplateTable::sipush() {
305 305 transition(vtos, itos);
306 306 __ load_unsigned_short(rax, at_bcp(1));
307 307 __ bswapl(rax);
308 308 __ sarl(rax, 16);
309 309 }
310 310
311 311 void TemplateTable::ldc(bool wide) {
312 312 transition(vtos, vtos);
313 313 Label call_ldc, notFloat, notClass, Done;
314 314
315 315 if (wide) {
316 316 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
317 317 } else {
318 318 __ load_unsigned_byte(rbx, at_bcp(1));
319 319 }
320 320 __ get_cpool_and_tags(rcx, rax);
321 321 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
322 322 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
323 323
324 324 // get type
325 325 __ xorptr(rdx, rdx);
326 326 __ movb(rdx, Address(rax, rbx, Address::times_1, tags_offset));
327 327
328 328 // unresolved string - get the resolved string
329 329 __ cmpl(rdx, JVM_CONSTANT_UnresolvedString);
330 330 __ jccb(Assembler::equal, call_ldc);
331 331
332 332 // unresolved class - get the resolved class
333 333 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
334 334 __ jccb(Assembler::equal, call_ldc);
335 335
336 336 // unresolved class in error (resolution failed) - call into runtime
337 337 // so that the same error from first resolution attempt is thrown.
338 338 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
339 339 __ jccb(Assembler::equal, call_ldc);
340 340
341 341 // resolved class - need to call vm to get java mirror of the class
342 342 __ cmpl(rdx, JVM_CONSTANT_Class);
343 343 __ jcc(Assembler::notEqual, notClass);
344 344
345 345 __ bind(call_ldc);
346 346 __ movl(rcx, wide);
347 347 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rcx);
348 348 __ push(atos);
349 349 __ jmp(Done);
350 350
351 351 __ bind(notClass);
352 352 __ cmpl(rdx, JVM_CONSTANT_Float);
353 353 __ jccb(Assembler::notEqual, notFloat);
354 354 // ftos
355 355 __ fld_s( Address(rcx, rbx, Address::times_ptr, base_offset));
356 356 __ push(ftos);
357 357 __ jmp(Done);
358 358
359 359 __ bind(notFloat);
360 360 #ifdef ASSERT
361 361 { Label L;
362 362 __ cmpl(rdx, JVM_CONSTANT_Integer);
363 363 __ jcc(Assembler::equal, L);
364 364 __ cmpl(rdx, JVM_CONSTANT_String);
365 365 __ jcc(Assembler::equal, L);
366 366 __ stop("unexpected tag type in ldc");
367 367 __ bind(L);
368 368 }
369 369 #endif
370 370 Label isOop;
371 371 // atos and itos
372 372 // String is only oop type we will see here
373 373 __ cmpl(rdx, JVM_CONSTANT_String);
374 374 __ jccb(Assembler::equal, isOop);
375 375 __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
376 376 __ push(itos);
377 377 __ jmp(Done);
378 378 __ bind(isOop);
379 379 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
380 380 __ push(atos);
381 381
382 382 if (VerifyOops) {
383 383 __ verify_oop(rax);
↓ open down ↓ |
383 lines elided |
↑ open up ↑ |
384 384 }
385 385 __ bind(Done);
386 386 }
387 387
388 388 // Fast path for caching oop constants.
389 389 // %%% We should use this to handle Class and String constants also.
390 390 // %%% It will simplify the ldc/primitive path considerably.
391 391 void TemplateTable::fast_aldc(bool wide) {
392 392 transition(vtos, atos);
393 393
394 - if (!EnableMethodHandles) {
395 - // We should not encounter this bytecode if !EnableMethodHandles.
394 + if (!EnableInvokeDynamic) {
395 + // We should not encounter this bytecode if !EnableInvokeDynamic.
396 396 // The verifier will stop it. However, if we get past the verifier,
397 397 // this will stop the thread in a reasonable way, without crashing the JVM.
398 398 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
399 399 InterpreterRuntime::throw_IncompatibleClassChangeError));
400 400 // the call_VM checks for exception, so we should never return here.
401 401 __ should_not_reach_here();
402 402 return;
403 403 }
404 404
405 405 const Register cache = rcx;
406 406 const Register index = rdx;
407 407
408 408 resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
409 409 if (VerifyOops) {
410 410 __ verify_oop(rax);
411 411 }
412 412
413 413 Label L_done, L_throw_exception;
414 414 const Register con_klass_temp = rcx; // same as Rcache
415 415 __ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes()));
416 416 __ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
417 417 __ jcc(Assembler::notEqual, L_done);
418 418 __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0);
419 419 __ jcc(Assembler::notEqual, L_throw_exception);
420 420 __ xorptr(rax, rax);
421 421 __ jmp(L_done);
422 422
423 423 // Load the exception from the system-array which wraps it:
424 424 __ bind(L_throw_exception);
425 425 __ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
426 426 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
427 427
428 428 __ bind(L_done);
429 429 }
430 430
431 431 void TemplateTable::ldc2_w() {
432 432 transition(vtos, vtos);
433 433 Label Long, Done;
434 434 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
435 435
436 436 __ get_cpool_and_tags(rcx, rax);
437 437 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
438 438 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
439 439
440 440 // get type
441 441 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), JVM_CONSTANT_Double);
442 442 __ jccb(Assembler::notEqual, Long);
443 443 // dtos
444 444 __ fld_d( Address(rcx, rbx, Address::times_ptr, base_offset));
445 445 __ push(dtos);
446 446 __ jmpb(Done);
447 447
448 448 __ bind(Long);
449 449 // ltos
450 450 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
451 451 NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
452 452
453 453 __ push(ltos);
454 454
455 455 __ bind(Done);
456 456 }
457 457
458 458
459 459 void TemplateTable::locals_index(Register reg, int offset) {
460 460 __ load_unsigned_byte(reg, at_bcp(offset));
461 461 __ negptr(reg);
462 462 }
463 463
464 464
465 465 void TemplateTable::iload() {
466 466 transition(vtos, itos);
467 467 if (RewriteFrequentPairs) {
468 468 Label rewrite, done;
469 469
470 470 // get next byte
471 471 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
472 472 // if _iload, wait to rewrite to iload2. We only want to rewrite the
473 473 // last two iloads in a pair. Comparing against fast_iload means that
474 474 // the next bytecode is neither an iload or a caload, and therefore
475 475 // an iload pair.
476 476 __ cmpl(rbx, Bytecodes::_iload);
477 477 __ jcc(Assembler::equal, done);
478 478
479 479 __ cmpl(rbx, Bytecodes::_fast_iload);
480 480 __ movl(rcx, Bytecodes::_fast_iload2);
481 481 __ jccb(Assembler::equal, rewrite);
482 482
483 483 // if _caload, rewrite to fast_icaload
484 484 __ cmpl(rbx, Bytecodes::_caload);
485 485 __ movl(rcx, Bytecodes::_fast_icaload);
486 486 __ jccb(Assembler::equal, rewrite);
487 487
488 488 // rewrite so iload doesn't check again.
489 489 __ movl(rcx, Bytecodes::_fast_iload);
490 490
491 491 // rewrite
492 492 // rcx: fast bytecode
493 493 __ bind(rewrite);
494 494 patch_bytecode(Bytecodes::_iload, rcx, rbx, false);
495 495 __ bind(done);
496 496 }
497 497
498 498 // Get the local value into tos
499 499 locals_index(rbx);
500 500 __ movl(rax, iaddress(rbx));
501 501 }
502 502
503 503
504 504 void TemplateTable::fast_iload2() {
505 505 transition(vtos, itos);
506 506 locals_index(rbx);
507 507 __ movl(rax, iaddress(rbx));
508 508 __ push(itos);
509 509 locals_index(rbx, 3);
510 510 __ movl(rax, iaddress(rbx));
511 511 }
512 512
513 513 void TemplateTable::fast_iload() {
514 514 transition(vtos, itos);
515 515 locals_index(rbx);
516 516 __ movl(rax, iaddress(rbx));
517 517 }
518 518
519 519
520 520 void TemplateTable::lload() {
521 521 transition(vtos, ltos);
522 522 locals_index(rbx);
523 523 __ movptr(rax, laddress(rbx));
524 524 NOT_LP64(__ movl(rdx, haddress(rbx)));
525 525 }
526 526
527 527
528 528 void TemplateTable::fload() {
529 529 transition(vtos, ftos);
530 530 locals_index(rbx);
531 531 __ fld_s(faddress(rbx));
532 532 }
533 533
534 534
535 535 void TemplateTable::dload() {
536 536 transition(vtos, dtos);
537 537 locals_index(rbx);
538 538 __ fld_d(daddress(rbx));
539 539 }
540 540
541 541
542 542 void TemplateTable::aload() {
543 543 transition(vtos, atos);
544 544 locals_index(rbx);
545 545 __ movptr(rax, aaddress(rbx));
546 546 }
547 547
548 548
549 549 void TemplateTable::locals_index_wide(Register reg) {
550 550 __ movl(reg, at_bcp(2));
551 551 __ bswapl(reg);
552 552 __ shrl(reg, 16);
553 553 __ negptr(reg);
554 554 }
555 555
556 556
557 557 void TemplateTable::wide_iload() {
558 558 transition(vtos, itos);
559 559 locals_index_wide(rbx);
560 560 __ movl(rax, iaddress(rbx));
561 561 }
562 562
563 563
564 564 void TemplateTable::wide_lload() {
565 565 transition(vtos, ltos);
566 566 locals_index_wide(rbx);
567 567 __ movptr(rax, laddress(rbx));
568 568 NOT_LP64(__ movl(rdx, haddress(rbx)));
569 569 }
570 570
571 571
572 572 void TemplateTable::wide_fload() {
573 573 transition(vtos, ftos);
574 574 locals_index_wide(rbx);
575 575 __ fld_s(faddress(rbx));
576 576 }
577 577
578 578
579 579 void TemplateTable::wide_dload() {
580 580 transition(vtos, dtos);
581 581 locals_index_wide(rbx);
582 582 __ fld_d(daddress(rbx));
583 583 }
584 584
585 585
586 586 void TemplateTable::wide_aload() {
587 587 transition(vtos, atos);
588 588 locals_index_wide(rbx);
589 589 __ movptr(rax, aaddress(rbx));
590 590 }
591 591
592 592 void TemplateTable::index_check(Register array, Register index) {
593 593 // Pop ptr into array
594 594 __ pop_ptr(array);
595 595 index_check_without_pop(array, index);
596 596 }
597 597
598 598 void TemplateTable::index_check_without_pop(Register array, Register index) {
599 599 // destroys rbx,
600 600 // check array
601 601 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
602 602 LP64_ONLY(__ movslq(index, index));
603 603 // check index
604 604 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
605 605 if (index != rbx) {
606 606 // ??? convention: move aberrant index into rbx, for exception message
607 607 assert(rbx != array, "different registers");
608 608 __ mov(rbx, index);
609 609 }
610 610 __ jump_cc(Assembler::aboveEqual,
611 611 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
612 612 }
613 613
614 614
615 615 void TemplateTable::iaload() {
616 616 transition(itos, itos);
617 617 // rdx: array
618 618 index_check(rdx, rax); // kills rbx,
619 619 // rax,: index
620 620 __ movl(rax, Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)));
621 621 }
622 622
623 623
624 624 void TemplateTable::laload() {
625 625 transition(itos, ltos);
626 626 // rax,: index
627 627 // rdx: array
628 628 index_check(rdx, rax);
629 629 __ mov(rbx, rax);
630 630 // rbx,: index
631 631 __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
632 632 NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
633 633 }
634 634
635 635
636 636 void TemplateTable::faload() {
637 637 transition(itos, ftos);
638 638 // rdx: array
639 639 index_check(rdx, rax); // kills rbx,
640 640 // rax,: index
641 641 __ fld_s(Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
642 642 }
643 643
644 644
645 645 void TemplateTable::daload() {
646 646 transition(itos, dtos);
647 647 // rdx: array
648 648 index_check(rdx, rax); // kills rbx,
649 649 // rax,: index
650 650 __ fld_d(Address(rdx, rax, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
651 651 }
652 652
653 653
654 654 void TemplateTable::aaload() {
655 655 transition(itos, atos);
656 656 // rdx: array
657 657 index_check(rdx, rax); // kills rbx,
658 658 // rax,: index
659 659 __ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
660 660 }
661 661
662 662
663 663 void TemplateTable::baload() {
664 664 transition(itos, itos);
665 665 // rdx: array
666 666 index_check(rdx, rax); // kills rbx,
667 667 // rax,: index
668 668 // can do better code for P5 - fix this at some point
669 669 __ load_signed_byte(rbx, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
670 670 __ mov(rax, rbx);
671 671 }
672 672
673 673
674 674 void TemplateTable::caload() {
675 675 transition(itos, itos);
676 676 // rdx: array
677 677 index_check(rdx, rax); // kills rbx,
678 678 // rax,: index
679 679 // can do better code for P5 - may want to improve this at some point
680 680 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
681 681 __ mov(rax, rbx);
682 682 }
683 683
684 684 // iload followed by caload frequent pair
685 685 void TemplateTable::fast_icaload() {
686 686 transition(vtos, itos);
687 687 // load index out of locals
688 688 locals_index(rbx);
689 689 __ movl(rax, iaddress(rbx));
690 690
691 691 // rdx: array
692 692 index_check(rdx, rax);
693 693 // rax,: index
694 694 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
695 695 __ mov(rax, rbx);
696 696 }
697 697
698 698 void TemplateTable::saload() {
699 699 transition(itos, itos);
700 700 // rdx: array
701 701 index_check(rdx, rax); // kills rbx,
702 702 // rax,: index
703 703 // can do better code for P5 - may want to improve this at some point
704 704 __ load_signed_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
705 705 __ mov(rax, rbx);
706 706 }
707 707
708 708
709 709 void TemplateTable::iload(int n) {
710 710 transition(vtos, itos);
711 711 __ movl(rax, iaddress(n));
712 712 }
713 713
714 714
715 715 void TemplateTable::lload(int n) {
716 716 transition(vtos, ltos);
717 717 __ movptr(rax, laddress(n));
718 718 NOT_LP64(__ movptr(rdx, haddress(n)));
719 719 }
720 720
721 721
722 722 void TemplateTable::fload(int n) {
723 723 transition(vtos, ftos);
724 724 __ fld_s(faddress(n));
725 725 }
726 726
727 727
728 728 void TemplateTable::dload(int n) {
729 729 transition(vtos, dtos);
730 730 __ fld_d(daddress(n));
731 731 }
732 732
733 733
734 734 void TemplateTable::aload(int n) {
735 735 transition(vtos, atos);
736 736 __ movptr(rax, aaddress(n));
737 737 }
738 738
739 739
740 740 void TemplateTable::aload_0() {
741 741 transition(vtos, atos);
742 742 // According to bytecode histograms, the pairs:
743 743 //
744 744 // _aload_0, _fast_igetfield
745 745 // _aload_0, _fast_agetfield
746 746 // _aload_0, _fast_fgetfield
747 747 //
748 748 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
749 749 // bytecode checks if the next bytecode is either _fast_igetfield,
750 750 // _fast_agetfield or _fast_fgetfield and then rewrites the
751 751 // current bytecode into a pair bytecode; otherwise it rewrites the current
752 752 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
753 753 //
754 754 // Note: If the next bytecode is _getfield, the rewrite must be delayed,
755 755 // otherwise we may miss an opportunity for a pair.
756 756 //
757 757 // Also rewrite frequent pairs
758 758 // aload_0, aload_1
759 759 // aload_0, iload_1
760 760 // These bytecodes with a small amount of code are most profitable to rewrite
761 761 if (RewriteFrequentPairs) {
762 762 Label rewrite, done;
763 763 // get next byte
764 764 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
765 765
766 766 // do actual aload_0
767 767 aload(0);
768 768
769 769 // if _getfield then wait with rewrite
770 770 __ cmpl(rbx, Bytecodes::_getfield);
771 771 __ jcc(Assembler::equal, done);
772 772
773 773 // if _igetfield then reqrite to _fast_iaccess_0
774 774 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
775 775 __ cmpl(rbx, Bytecodes::_fast_igetfield);
776 776 __ movl(rcx, Bytecodes::_fast_iaccess_0);
777 777 __ jccb(Assembler::equal, rewrite);
778 778
779 779 // if _agetfield then reqrite to _fast_aaccess_0
780 780 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
781 781 __ cmpl(rbx, Bytecodes::_fast_agetfield);
782 782 __ movl(rcx, Bytecodes::_fast_aaccess_0);
783 783 __ jccb(Assembler::equal, rewrite);
784 784
785 785 // if _fgetfield then reqrite to _fast_faccess_0
786 786 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
787 787 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
788 788 __ movl(rcx, Bytecodes::_fast_faccess_0);
789 789 __ jccb(Assembler::equal, rewrite);
790 790
791 791 // else rewrite to _fast_aload0
792 792 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
793 793 __ movl(rcx, Bytecodes::_fast_aload_0);
794 794
795 795 // rewrite
796 796 // rcx: fast bytecode
797 797 __ bind(rewrite);
798 798 patch_bytecode(Bytecodes::_aload_0, rcx, rbx, false);
799 799
800 800 __ bind(done);
801 801 } else {
802 802 aload(0);
803 803 }
804 804 }
805 805
806 806 void TemplateTable::istore() {
807 807 transition(itos, vtos);
808 808 locals_index(rbx);
809 809 __ movl(iaddress(rbx), rax);
810 810 }
811 811
812 812
813 813 void TemplateTable::lstore() {
814 814 transition(ltos, vtos);
815 815 locals_index(rbx);
816 816 __ movptr(laddress(rbx), rax);
817 817 NOT_LP64(__ movptr(haddress(rbx), rdx));
818 818 }
819 819
820 820
821 821 void TemplateTable::fstore() {
822 822 transition(ftos, vtos);
823 823 locals_index(rbx);
824 824 __ fstp_s(faddress(rbx));
825 825 }
826 826
827 827
828 828 void TemplateTable::dstore() {
829 829 transition(dtos, vtos);
830 830 locals_index(rbx);
831 831 __ fstp_d(daddress(rbx));
832 832 }
833 833
834 834
835 835 void TemplateTable::astore() {
836 836 transition(vtos, vtos);
837 837 __ pop_ptr(rax);
838 838 locals_index(rbx);
839 839 __ movptr(aaddress(rbx), rax);
840 840 }
841 841
842 842
843 843 void TemplateTable::wide_istore() {
844 844 transition(vtos, vtos);
845 845 __ pop_i(rax);
846 846 locals_index_wide(rbx);
847 847 __ movl(iaddress(rbx), rax);
848 848 }
849 849
850 850
851 851 void TemplateTable::wide_lstore() {
852 852 transition(vtos, vtos);
853 853 __ pop_l(rax, rdx);
854 854 locals_index_wide(rbx);
855 855 __ movptr(laddress(rbx), rax);
856 856 NOT_LP64(__ movl(haddress(rbx), rdx));
857 857 }
858 858
859 859
860 860 void TemplateTable::wide_fstore() {
861 861 wide_istore();
862 862 }
863 863
864 864
865 865 void TemplateTable::wide_dstore() {
866 866 wide_lstore();
867 867 }
868 868
869 869
870 870 void TemplateTable::wide_astore() {
871 871 transition(vtos, vtos);
872 872 __ pop_ptr(rax);
873 873 locals_index_wide(rbx);
874 874 __ movptr(aaddress(rbx), rax);
875 875 }
876 876
877 877
878 878 void TemplateTable::iastore() {
879 879 transition(itos, vtos);
880 880 __ pop_i(rbx);
881 881 // rax,: value
882 882 // rdx: array
883 883 index_check(rdx, rbx); // prefer index in rbx,
884 884 // rbx,: index
885 885 __ movl(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)), rax);
886 886 }
887 887
888 888
889 889 void TemplateTable::lastore() {
890 890 transition(ltos, vtos);
891 891 __ pop_i(rbx);
892 892 // rax,: low(value)
893 893 // rcx: array
894 894 // rdx: high(value)
895 895 index_check(rcx, rbx); // prefer index in rbx,
896 896 // rbx,: index
897 897 __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
898 898 NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
899 899 }
900 900
901 901
902 902 void TemplateTable::fastore() {
903 903 transition(ftos, vtos);
904 904 __ pop_i(rbx);
905 905 // rdx: array
906 906 // st0: value
907 907 index_check(rdx, rbx); // prefer index in rbx,
908 908 // rbx,: index
909 909 __ fstp_s(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
910 910 }
911 911
912 912
913 913 void TemplateTable::dastore() {
914 914 transition(dtos, vtos);
915 915 __ pop_i(rbx);
916 916 // rdx: array
917 917 // st0: value
918 918 index_check(rdx, rbx); // prefer index in rbx,
919 919 // rbx,: index
920 920 __ fstp_d(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
921 921 }
922 922
923 923
924 924 void TemplateTable::aastore() {
925 925 Label is_null, ok_is_subtype, done;
926 926 transition(vtos, vtos);
927 927 // stack: ..., array, index, value
928 928 __ movptr(rax, at_tos()); // Value
929 929 __ movl(rcx, at_tos_p1()); // Index
930 930 __ movptr(rdx, at_tos_p2()); // Array
931 931
932 932 Address element_address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
933 933 index_check_without_pop(rdx, rcx); // kills rbx,
934 934 // do array store check - check for NULL value first
935 935 __ testptr(rax, rax);
936 936 __ jcc(Assembler::zero, is_null);
937 937
938 938 // Move subklass into EBX
939 939 __ movptr(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
940 940 // Move superklass into EAX
941 941 __ movptr(rax, Address(rdx, oopDesc::klass_offset_in_bytes()));
942 942 __ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes()));
943 943 // Compress array+index*wordSize+12 into a single register. Frees ECX.
944 944 __ lea(rdx, element_address);
945 945
946 946 // Generate subtype check. Blows ECX. Resets EDI to locals.
947 947 // Superklass in EAX. Subklass in EBX.
948 948 __ gen_subtype_check( rbx, ok_is_subtype );
949 949
950 950 // Come here on failure
951 951 // object is at TOS
952 952 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
953 953
954 954 // Come here on success
955 955 __ bind(ok_is_subtype);
956 956
957 957 // Get the value to store
958 958 __ movptr(rax, at_rsp());
959 959 // and store it with appropriate barrier
960 960 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
961 961
962 962 __ jmp(done);
963 963
964 964 // Have a NULL in EAX, EDX=array, ECX=index. Store NULL at ary[idx]
965 965 __ bind(is_null);
966 966 __ profile_null_seen(rbx);
967 967
968 968 // Store NULL, (noreg means NULL to do_oop_store)
969 969 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
970 970
971 971 // Pop stack arguments
972 972 __ bind(done);
973 973 __ addptr(rsp, 3 * Interpreter::stackElementSize);
974 974 }
975 975
976 976
977 977 void TemplateTable::bastore() {
978 978 transition(itos, vtos);
979 979 __ pop_i(rbx);
980 980 // rax,: value
981 981 // rdx: array
982 982 index_check(rdx, rbx); // prefer index in rbx,
983 983 // rbx,: index
984 984 __ movb(Address(rdx, rbx, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)), rax);
985 985 }
986 986
987 987
988 988 void TemplateTable::castore() {
989 989 transition(itos, vtos);
990 990 __ pop_i(rbx);
991 991 // rax,: value
992 992 // rdx: array
993 993 index_check(rdx, rbx); // prefer index in rbx,
994 994 // rbx,: index
995 995 __ movw(Address(rdx, rbx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)), rax);
996 996 }
997 997
998 998
999 999 void TemplateTable::sastore() {
1000 1000 castore();
1001 1001 }
1002 1002
1003 1003
1004 1004 void TemplateTable::istore(int n) {
1005 1005 transition(itos, vtos);
1006 1006 __ movl(iaddress(n), rax);
1007 1007 }
1008 1008
1009 1009
1010 1010 void TemplateTable::lstore(int n) {
1011 1011 transition(ltos, vtos);
1012 1012 __ movptr(laddress(n), rax);
1013 1013 NOT_LP64(__ movptr(haddress(n), rdx));
1014 1014 }
1015 1015
1016 1016
1017 1017 void TemplateTable::fstore(int n) {
1018 1018 transition(ftos, vtos);
1019 1019 __ fstp_s(faddress(n));
1020 1020 }
1021 1021
1022 1022
1023 1023 void TemplateTable::dstore(int n) {
1024 1024 transition(dtos, vtos);
1025 1025 __ fstp_d(daddress(n));
1026 1026 }
1027 1027
1028 1028
1029 1029 void TemplateTable::astore(int n) {
1030 1030 transition(vtos, vtos);
1031 1031 __ pop_ptr(rax);
1032 1032 __ movptr(aaddress(n), rax);
1033 1033 }
1034 1034
1035 1035
1036 1036 void TemplateTable::pop() {
1037 1037 transition(vtos, vtos);
1038 1038 __ addptr(rsp, Interpreter::stackElementSize);
1039 1039 }
1040 1040
1041 1041
1042 1042 void TemplateTable::pop2() {
1043 1043 transition(vtos, vtos);
1044 1044 __ addptr(rsp, 2*Interpreter::stackElementSize);
1045 1045 }
1046 1046
1047 1047
1048 1048 void TemplateTable::dup() {
1049 1049 transition(vtos, vtos);
1050 1050 // stack: ..., a
1051 1051 __ load_ptr(0, rax);
1052 1052 __ push_ptr(rax);
1053 1053 // stack: ..., a, a
1054 1054 }
1055 1055
1056 1056
1057 1057 void TemplateTable::dup_x1() {
1058 1058 transition(vtos, vtos);
1059 1059 // stack: ..., a, b
1060 1060 __ load_ptr( 0, rax); // load b
1061 1061 __ load_ptr( 1, rcx); // load a
1062 1062 __ store_ptr(1, rax); // store b
1063 1063 __ store_ptr(0, rcx); // store a
1064 1064 __ push_ptr(rax); // push b
1065 1065 // stack: ..., b, a, b
1066 1066 }
1067 1067
1068 1068
1069 1069 void TemplateTable::dup_x2() {
1070 1070 transition(vtos, vtos);
1071 1071 // stack: ..., a, b, c
1072 1072 __ load_ptr( 0, rax); // load c
1073 1073 __ load_ptr( 2, rcx); // load a
1074 1074 __ store_ptr(2, rax); // store c in a
1075 1075 __ push_ptr(rax); // push c
1076 1076 // stack: ..., c, b, c, c
1077 1077 __ load_ptr( 2, rax); // load b
1078 1078 __ store_ptr(2, rcx); // store a in b
1079 1079 // stack: ..., c, a, c, c
1080 1080 __ store_ptr(1, rax); // store b in c
1081 1081 // stack: ..., c, a, b, c
1082 1082 }
1083 1083
1084 1084
1085 1085 void TemplateTable::dup2() {
1086 1086 transition(vtos, vtos);
1087 1087 // stack: ..., a, b
1088 1088 __ load_ptr(1, rax); // load a
1089 1089 __ push_ptr(rax); // push a
1090 1090 __ load_ptr(1, rax); // load b
1091 1091 __ push_ptr(rax); // push b
1092 1092 // stack: ..., a, b, a, b
1093 1093 }
1094 1094
1095 1095
1096 1096 void TemplateTable::dup2_x1() {
1097 1097 transition(vtos, vtos);
1098 1098 // stack: ..., a, b, c
1099 1099 __ load_ptr( 0, rcx); // load c
1100 1100 __ load_ptr( 1, rax); // load b
1101 1101 __ push_ptr(rax); // push b
1102 1102 __ push_ptr(rcx); // push c
1103 1103 // stack: ..., a, b, c, b, c
1104 1104 __ store_ptr(3, rcx); // store c in b
1105 1105 // stack: ..., a, c, c, b, c
1106 1106 __ load_ptr( 4, rcx); // load a
1107 1107 __ store_ptr(2, rcx); // store a in 2nd c
1108 1108 // stack: ..., a, c, a, b, c
1109 1109 __ store_ptr(4, rax); // store b in a
1110 1110 // stack: ..., b, c, a, b, c
1111 1111 // stack: ..., b, c, a, b, c
1112 1112 }
1113 1113
1114 1114
1115 1115 void TemplateTable::dup2_x2() {
1116 1116 transition(vtos, vtos);
1117 1117 // stack: ..., a, b, c, d
1118 1118 __ load_ptr( 0, rcx); // load d
1119 1119 __ load_ptr( 1, rax); // load c
1120 1120 __ push_ptr(rax); // push c
1121 1121 __ push_ptr(rcx); // push d
1122 1122 // stack: ..., a, b, c, d, c, d
1123 1123 __ load_ptr( 4, rax); // load b
1124 1124 __ store_ptr(2, rax); // store b in d
1125 1125 __ store_ptr(4, rcx); // store d in b
1126 1126 // stack: ..., a, d, c, b, c, d
1127 1127 __ load_ptr( 5, rcx); // load a
1128 1128 __ load_ptr( 3, rax); // load c
1129 1129 __ store_ptr(3, rcx); // store a in c
1130 1130 __ store_ptr(5, rax); // store c in a
1131 1131 // stack: ..., c, d, a, b, c, d
1132 1132 // stack: ..., c, d, a, b, c, d
1133 1133 }
1134 1134
1135 1135
1136 1136 void TemplateTable::swap() {
1137 1137 transition(vtos, vtos);
1138 1138 // stack: ..., a, b
1139 1139 __ load_ptr( 1, rcx); // load a
1140 1140 __ load_ptr( 0, rax); // load b
1141 1141 __ store_ptr(0, rcx); // store a in b
1142 1142 __ store_ptr(1, rax); // store b in a
1143 1143 // stack: ..., b, a
1144 1144 }
1145 1145
1146 1146
1147 1147 void TemplateTable::iop2(Operation op) {
1148 1148 transition(itos, itos);
1149 1149 switch (op) {
1150 1150 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1151 1151 case sub : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1152 1152 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1153 1153 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1154 1154 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1155 1155 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1156 1156 case shl : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1157 1157 case shr : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1158 1158 case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1159 1159 default : ShouldNotReachHere();
1160 1160 }
1161 1161 }
1162 1162
1163 1163
1164 1164 void TemplateTable::lop2(Operation op) {
1165 1165 transition(ltos, ltos);
1166 1166 __ pop_l(rbx, rcx);
1167 1167 switch (op) {
1168 1168 case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1169 1169 case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1170 1170 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1171 1171 case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1172 1172 case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1173 1173 case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1174 1174 default : ShouldNotReachHere();
1175 1175 }
1176 1176 }
1177 1177
1178 1178
1179 1179 void TemplateTable::idiv() {
1180 1180 transition(itos, itos);
1181 1181 __ mov(rcx, rax);
1182 1182 __ pop_i(rax);
1183 1183 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1184 1184 // they are not equal, one could do a normal division (no correction
1185 1185 // needed), which may speed up this implementation for the common case.
1186 1186 // (see also JVM spec., p.243 & p.271)
1187 1187 __ corrected_idivl(rcx);
1188 1188 }
1189 1189
1190 1190
1191 1191 void TemplateTable::irem() {
1192 1192 transition(itos, itos);
1193 1193 __ mov(rcx, rax);
1194 1194 __ pop_i(rax);
1195 1195 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1196 1196 // they are not equal, one could do a normal division (no correction
1197 1197 // needed), which may speed up this implementation for the common case.
1198 1198 // (see also JVM spec., p.243 & p.271)
1199 1199 __ corrected_idivl(rcx);
1200 1200 __ mov(rax, rdx);
1201 1201 }
1202 1202
1203 1203
1204 1204 void TemplateTable::lmul() {
1205 1205 transition(ltos, ltos);
1206 1206 __ pop_l(rbx, rcx);
1207 1207 __ push(rcx); __ push(rbx);
1208 1208 __ push(rdx); __ push(rax);
1209 1209 __ lmul(2 * wordSize, 0);
1210 1210 __ addptr(rsp, 4 * wordSize); // take off temporaries
1211 1211 }
1212 1212
1213 1213
1214 1214 void TemplateTable::ldiv() {
1215 1215 transition(ltos, ltos);
1216 1216 __ pop_l(rbx, rcx);
1217 1217 __ push(rcx); __ push(rbx);
1218 1218 __ push(rdx); __ push(rax);
1219 1219 // check if y = 0
1220 1220 __ orl(rax, rdx);
1221 1221 __ jump_cc(Assembler::zero,
1222 1222 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1223 1223 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1224 1224 __ addptr(rsp, 4 * wordSize); // take off temporaries
1225 1225 }
1226 1226
1227 1227
1228 1228 void TemplateTable::lrem() {
1229 1229 transition(ltos, ltos);
1230 1230 __ pop_l(rbx, rcx);
1231 1231 __ push(rcx); __ push(rbx);
1232 1232 __ push(rdx); __ push(rax);
1233 1233 // check if y = 0
1234 1234 __ orl(rax, rdx);
1235 1235 __ jump_cc(Assembler::zero,
1236 1236 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1237 1237 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1238 1238 __ addptr(rsp, 4 * wordSize);
1239 1239 }
1240 1240
1241 1241
1242 1242 void TemplateTable::lshl() {
1243 1243 transition(itos, ltos);
1244 1244 __ movl(rcx, rax); // get shift count
1245 1245 __ pop_l(rax, rdx); // get shift value
1246 1246 __ lshl(rdx, rax);
1247 1247 }
1248 1248
1249 1249
1250 1250 void TemplateTable::lshr() {
1251 1251 transition(itos, ltos);
1252 1252 __ mov(rcx, rax); // get shift count
1253 1253 __ pop_l(rax, rdx); // get shift value
1254 1254 __ lshr(rdx, rax, true);
1255 1255 }
1256 1256
1257 1257
1258 1258 void TemplateTable::lushr() {
1259 1259 transition(itos, ltos);
1260 1260 __ mov(rcx, rax); // get shift count
1261 1261 __ pop_l(rax, rdx); // get shift value
1262 1262 __ lshr(rdx, rax);
1263 1263 }
1264 1264
1265 1265
1266 1266 void TemplateTable::fop2(Operation op) {
1267 1267 transition(ftos, ftos);
1268 1268 switch (op) {
1269 1269 case add: __ fadd_s (at_rsp()); break;
1270 1270 case sub: __ fsubr_s(at_rsp()); break;
1271 1271 case mul: __ fmul_s (at_rsp()); break;
1272 1272 case div: __ fdivr_s(at_rsp()); break;
1273 1273 case rem: __ fld_s (at_rsp()); __ fremr(rax); break;
1274 1274 default : ShouldNotReachHere();
1275 1275 }
1276 1276 __ f2ieee();
1277 1277 __ pop(rax); // pop float thing off
1278 1278 }
1279 1279
1280 1280
1281 1281 void TemplateTable::dop2(Operation op) {
1282 1282 transition(dtos, dtos);
1283 1283
1284 1284 switch (op) {
1285 1285 case add: __ fadd_d (at_rsp()); break;
1286 1286 case sub: __ fsubr_d(at_rsp()); break;
1287 1287 case mul: {
1288 1288 Label L_strict;
1289 1289 Label L_join;
1290 1290 const Address access_flags (rcx, methodOopDesc::access_flags_offset());
1291 1291 __ get_method(rcx);
1292 1292 __ movl(rcx, access_flags);
1293 1293 __ testl(rcx, JVM_ACC_STRICT);
1294 1294 __ jccb(Assembler::notZero, L_strict);
1295 1295 __ fmul_d (at_rsp());
1296 1296 __ jmpb(L_join);
1297 1297 __ bind(L_strict);
1298 1298 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1299 1299 __ fmulp();
1300 1300 __ fmul_d (at_rsp());
1301 1301 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1302 1302 __ fmulp();
1303 1303 __ bind(L_join);
1304 1304 break;
1305 1305 }
1306 1306 case div: {
1307 1307 Label L_strict;
1308 1308 Label L_join;
1309 1309 const Address access_flags (rcx, methodOopDesc::access_flags_offset());
1310 1310 __ get_method(rcx);
1311 1311 __ movl(rcx, access_flags);
1312 1312 __ testl(rcx, JVM_ACC_STRICT);
1313 1313 __ jccb(Assembler::notZero, L_strict);
1314 1314 __ fdivr_d(at_rsp());
1315 1315 __ jmp(L_join);
1316 1316 __ bind(L_strict);
1317 1317 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1318 1318 __ fmul_d (at_rsp());
1319 1319 __ fdivrp();
1320 1320 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1321 1321 __ fmulp();
1322 1322 __ bind(L_join);
1323 1323 break;
1324 1324 }
1325 1325 case rem: __ fld_d (at_rsp()); __ fremr(rax); break;
1326 1326 default : ShouldNotReachHere();
1327 1327 }
1328 1328 __ d2ieee();
1329 1329 // Pop double precision number from rsp.
1330 1330 __ pop(rax);
1331 1331 __ pop(rdx);
1332 1332 }
1333 1333
1334 1334
1335 1335 void TemplateTable::ineg() {
1336 1336 transition(itos, itos);
1337 1337 __ negl(rax);
1338 1338 }
1339 1339
1340 1340
1341 1341 void TemplateTable::lneg() {
1342 1342 transition(ltos, ltos);
1343 1343 __ lneg(rdx, rax);
1344 1344 }
1345 1345
1346 1346
1347 1347 void TemplateTable::fneg() {
1348 1348 transition(ftos, ftos);
1349 1349 __ fchs();
1350 1350 }
1351 1351
1352 1352
1353 1353 void TemplateTable::dneg() {
1354 1354 transition(dtos, dtos);
1355 1355 __ fchs();
1356 1356 }
1357 1357
1358 1358
1359 1359 void TemplateTable::iinc() {
1360 1360 transition(vtos, vtos);
1361 1361 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1362 1362 locals_index(rbx);
1363 1363 __ addl(iaddress(rbx), rdx);
1364 1364 }
1365 1365
1366 1366
1367 1367 void TemplateTable::wide_iinc() {
1368 1368 transition(vtos, vtos);
1369 1369 __ movl(rdx, at_bcp(4)); // get constant
1370 1370 locals_index_wide(rbx);
1371 1371 __ bswapl(rdx); // swap bytes & sign-extend constant
1372 1372 __ sarl(rdx, 16);
1373 1373 __ addl(iaddress(rbx), rdx);
1374 1374 // Note: should probably use only one movl to get both
1375 1375 // the index and the constant -> fix this
1376 1376 }
1377 1377
1378 1378
1379 1379 void TemplateTable::convert() {
1380 1380 // Checking
1381 1381 #ifdef ASSERT
1382 1382 { TosState tos_in = ilgl;
1383 1383 TosState tos_out = ilgl;
1384 1384 switch (bytecode()) {
1385 1385 case Bytecodes::_i2l: // fall through
1386 1386 case Bytecodes::_i2f: // fall through
1387 1387 case Bytecodes::_i2d: // fall through
1388 1388 case Bytecodes::_i2b: // fall through
1389 1389 case Bytecodes::_i2c: // fall through
1390 1390 case Bytecodes::_i2s: tos_in = itos; break;
1391 1391 case Bytecodes::_l2i: // fall through
1392 1392 case Bytecodes::_l2f: // fall through
1393 1393 case Bytecodes::_l2d: tos_in = ltos; break;
1394 1394 case Bytecodes::_f2i: // fall through
1395 1395 case Bytecodes::_f2l: // fall through
1396 1396 case Bytecodes::_f2d: tos_in = ftos; break;
1397 1397 case Bytecodes::_d2i: // fall through
1398 1398 case Bytecodes::_d2l: // fall through
1399 1399 case Bytecodes::_d2f: tos_in = dtos; break;
1400 1400 default : ShouldNotReachHere();
1401 1401 }
1402 1402 switch (bytecode()) {
1403 1403 case Bytecodes::_l2i: // fall through
1404 1404 case Bytecodes::_f2i: // fall through
1405 1405 case Bytecodes::_d2i: // fall through
1406 1406 case Bytecodes::_i2b: // fall through
1407 1407 case Bytecodes::_i2c: // fall through
1408 1408 case Bytecodes::_i2s: tos_out = itos; break;
1409 1409 case Bytecodes::_i2l: // fall through
1410 1410 case Bytecodes::_f2l: // fall through
1411 1411 case Bytecodes::_d2l: tos_out = ltos; break;
1412 1412 case Bytecodes::_i2f: // fall through
1413 1413 case Bytecodes::_l2f: // fall through
1414 1414 case Bytecodes::_d2f: tos_out = ftos; break;
1415 1415 case Bytecodes::_i2d: // fall through
1416 1416 case Bytecodes::_l2d: // fall through
1417 1417 case Bytecodes::_f2d: tos_out = dtos; break;
1418 1418 default : ShouldNotReachHere();
1419 1419 }
1420 1420 transition(tos_in, tos_out);
1421 1421 }
1422 1422 #endif // ASSERT
1423 1423
1424 1424 // Conversion
1425 1425 // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1426 1426 switch (bytecode()) {
1427 1427 case Bytecodes::_i2l:
1428 1428 __ extend_sign(rdx, rax);
1429 1429 break;
1430 1430 case Bytecodes::_i2f:
1431 1431 __ push(rax); // store int on tos
1432 1432 __ fild_s(at_rsp()); // load int to ST0
1433 1433 __ f2ieee(); // truncate to float size
1434 1434 __ pop(rcx); // adjust rsp
1435 1435 break;
1436 1436 case Bytecodes::_i2d:
1437 1437 __ push(rax); // add one slot for d2ieee()
1438 1438 __ push(rax); // store int on tos
1439 1439 __ fild_s(at_rsp()); // load int to ST0
1440 1440 __ d2ieee(); // truncate to double size
1441 1441 __ pop(rcx); // adjust rsp
1442 1442 __ pop(rcx);
1443 1443 break;
1444 1444 case Bytecodes::_i2b:
1445 1445 __ shll(rax, 24); // truncate upper 24 bits
1446 1446 __ sarl(rax, 24); // and sign-extend byte
1447 1447 LP64_ONLY(__ movsbl(rax, rax));
1448 1448 break;
1449 1449 case Bytecodes::_i2c:
1450 1450 __ andl(rax, 0xFFFF); // truncate upper 16 bits
1451 1451 LP64_ONLY(__ movzwl(rax, rax));
1452 1452 break;
1453 1453 case Bytecodes::_i2s:
1454 1454 __ shll(rax, 16); // truncate upper 16 bits
1455 1455 __ sarl(rax, 16); // and sign-extend short
1456 1456 LP64_ONLY(__ movswl(rax, rax));
1457 1457 break;
1458 1458 case Bytecodes::_l2i:
1459 1459 /* nothing to do */
1460 1460 break;
1461 1461 case Bytecodes::_l2f:
1462 1462 __ push(rdx); // store long on tos
1463 1463 __ push(rax);
1464 1464 __ fild_d(at_rsp()); // load long to ST0
1465 1465 __ f2ieee(); // truncate to float size
1466 1466 __ pop(rcx); // adjust rsp
1467 1467 __ pop(rcx);
1468 1468 break;
1469 1469 case Bytecodes::_l2d:
1470 1470 __ push(rdx); // store long on tos
1471 1471 __ push(rax);
1472 1472 __ fild_d(at_rsp()); // load long to ST0
1473 1473 __ d2ieee(); // truncate to double size
1474 1474 __ pop(rcx); // adjust rsp
1475 1475 __ pop(rcx);
1476 1476 break;
1477 1477 case Bytecodes::_f2i:
1478 1478 __ push(rcx); // reserve space for argument
1479 1479 __ fstp_s(at_rsp()); // pass float argument on stack
1480 1480 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1481 1481 break;
1482 1482 case Bytecodes::_f2l:
1483 1483 __ push(rcx); // reserve space for argument
1484 1484 __ fstp_s(at_rsp()); // pass float argument on stack
1485 1485 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1486 1486 break;
1487 1487 case Bytecodes::_f2d:
1488 1488 /* nothing to do */
1489 1489 break;
1490 1490 case Bytecodes::_d2i:
1491 1491 __ push(rcx); // reserve space for argument
1492 1492 __ push(rcx);
1493 1493 __ fstp_d(at_rsp()); // pass double argument on stack
1494 1494 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
1495 1495 break;
1496 1496 case Bytecodes::_d2l:
1497 1497 __ push(rcx); // reserve space for argument
1498 1498 __ push(rcx);
1499 1499 __ fstp_d(at_rsp()); // pass double argument on stack
1500 1500 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
1501 1501 break;
1502 1502 case Bytecodes::_d2f:
1503 1503 __ push(rcx); // reserve space for f2ieee()
1504 1504 __ f2ieee(); // truncate to float size
1505 1505 __ pop(rcx); // adjust rsp
1506 1506 break;
1507 1507 default :
1508 1508 ShouldNotReachHere();
1509 1509 }
1510 1510 }
1511 1511
1512 1512
1513 1513 void TemplateTable::lcmp() {
1514 1514 transition(ltos, itos);
1515 1515 // y = rdx:rax
1516 1516 __ pop_l(rbx, rcx); // get x = rcx:rbx
1517 1517 __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
1518 1518 __ mov(rax, rcx);
1519 1519 }
1520 1520
1521 1521
1522 1522 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1523 1523 if (is_float) {
1524 1524 __ fld_s(at_rsp());
1525 1525 } else {
1526 1526 __ fld_d(at_rsp());
1527 1527 __ pop(rdx);
1528 1528 }
1529 1529 __ pop(rcx);
1530 1530 __ fcmp2int(rax, unordered_result < 0);
1531 1531 }
1532 1532
1533 1533
1534 1534 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1535 1535 __ get_method(rcx); // ECX holds method
1536 1536 __ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count
1537 1537
1538 1538 const ByteSize be_offset = methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset();
1539 1539 const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset();
1540 1540 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
1541 1541
1542 1542 // Load up EDX with the branch displacement
1543 1543 __ movl(rdx, at_bcp(1));
1544 1544 __ bswapl(rdx);
1545 1545 if (!is_wide) __ sarl(rdx, 16);
1546 1546 LP64_ONLY(__ movslq(rdx, rdx));
1547 1547
1548 1548
1549 1549 // Handle all the JSR stuff here, then exit.
1550 1550 // It's much shorter and cleaner than intermingling with the
1551 1551 // non-JSR normal-branch stuff occurring below.
1552 1552 if (is_jsr) {
1553 1553 // Pre-load the next target bytecode into EBX
1554 1554 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0));
1555 1555
1556 1556 // compute return address as bci in rax,
1557 1557 __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(constMethodOopDesc::codes_offset())));
1558 1558 __ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
1559 1559 // Adjust the bcp in RSI by the displacement in EDX
1560 1560 __ addptr(rsi, rdx);
1561 1561 // Push return address
1562 1562 __ push_i(rax);
1563 1563 // jsr returns vtos
1564 1564 __ dispatch_only_noverify(vtos);
1565 1565 return;
1566 1566 }
1567 1567
1568 1568 // Normal (non-jsr) branch handling
1569 1569
1570 1570 // Adjust the bcp in RSI by the displacement in EDX
1571 1571 __ addptr(rsi, rdx);
1572 1572
1573 1573 assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
1574 1574 Label backedge_counter_overflow;
1575 1575 Label profile_method;
1576 1576 Label dispatch;
1577 1577 if (UseLoopCounter) {
1578 1578 // increment backedge counter for backward branches
1579 1579 // rax,: MDO
1580 1580 // rbx,: MDO bumped taken-count
1581 1581 // rcx: method
1582 1582 // rdx: target offset
1583 1583 // rsi: target bcp
1584 1584 // rdi: locals pointer
1585 1585 __ testl(rdx, rdx); // check if forward or backward branch
1586 1586 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1587 1587
1588 1588 if (TieredCompilation) {
1589 1589 Label no_mdo;
1590 1590 int increment = InvocationCounter::count_increment;
1591 1591 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1592 1592 if (ProfileInterpreter) {
1593 1593 // Are we profiling?
1594 1594 __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
1595 1595 __ testptr(rbx, rbx);
1596 1596 __ jccb(Assembler::zero, no_mdo);
1597 1597 // Increment the MDO backedge counter
1598 1598 const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
1599 1599 in_bytes(InvocationCounter::counter_offset()));
1600 1600 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1601 1601 rax, false, Assembler::zero, &backedge_counter_overflow);
1602 1602 __ jmp(dispatch);
1603 1603 }
1604 1604 __ bind(no_mdo);
1605 1605 // Increment backedge counter in methodOop
1606 1606 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
1607 1607 rax, false, Assembler::zero, &backedge_counter_overflow);
1608 1608 } else {
1609 1609 // increment counter
1610 1610 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1611 1611 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1612 1612 __ movl(Address(rcx, be_offset), rax); // store counter
1613 1613
1614 1614 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1615 1615 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1616 1616 __ addl(rax, Address(rcx, be_offset)); // add both counters
1617 1617
1618 1618 if (ProfileInterpreter) {
1619 1619 // Test to see if we should create a method data oop
1620 1620 __ cmp32(rax,
1621 1621 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1622 1622 __ jcc(Assembler::less, dispatch);
1623 1623
1624 1624 // if no method data exists, go to profile method
1625 1625 __ test_method_data_pointer(rax, profile_method);
1626 1626
1627 1627 if (UseOnStackReplacement) {
1628 1628 // check for overflow against rbx, which is the MDO taken count
1629 1629 __ cmp32(rbx,
1630 1630 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1631 1631 __ jcc(Assembler::below, dispatch);
1632 1632
1633 1633 // When ProfileInterpreter is on, the backedge_count comes from the
1634 1634 // methodDataOop, which value does not get reset on the call to
1635 1635 // frequency_counter_overflow(). To avoid excessive calls to the overflow
1636 1636 // routine while the method is being compiled, add a second test to make
1637 1637 // sure the overflow function is called only once every overflow_frequency.
1638 1638 const int overflow_frequency = 1024;
1639 1639 __ andptr(rbx, overflow_frequency-1);
1640 1640 __ jcc(Assembler::zero, backedge_counter_overflow);
1641 1641 }
1642 1642 } else {
1643 1643 if (UseOnStackReplacement) {
1644 1644 // check for overflow against rax, which is the sum of the counters
1645 1645 __ cmp32(rax,
1646 1646 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1647 1647 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1648 1648
1649 1649 }
1650 1650 }
1651 1651 }
1652 1652 __ bind(dispatch);
1653 1653 }
1654 1654
1655 1655 // Pre-load the next target bytecode into EBX
1656 1656 __ load_unsigned_byte(rbx, Address(rsi, 0));
1657 1657
1658 1658 // continue with the bytecode @ target
1659 1659 // rax,: return bci for jsr's, unused otherwise
1660 1660 // rbx,: target bytecode
1661 1661 // rsi: target bcp
1662 1662 __ dispatch_only(vtos);
1663 1663
1664 1664 if (UseLoopCounter) {
1665 1665 if (ProfileInterpreter) {
1666 1666 // Out-of-line code to allocate method data oop.
1667 1667 __ bind(profile_method);
1668 1668 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1669 1669 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1670 1670 __ set_method_data_pointer_for_bcp();
1671 1671 __ jmp(dispatch);
1672 1672 }
1673 1673
1674 1674 if (UseOnStackReplacement) {
1675 1675
1676 1676 // invocation counter overflow
1677 1677 __ bind(backedge_counter_overflow);
1678 1678 __ negptr(rdx);
1679 1679 __ addptr(rdx, rsi); // branch bcp
1680 1680 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rdx);
1681 1681 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1682 1682
1683 1683 // rax,: osr nmethod (osr ok) or NULL (osr not possible)
1684 1684 // rbx,: target bytecode
1685 1685 // rdx: scratch
1686 1686 // rdi: locals pointer
1687 1687 // rsi: bcp
1688 1688 __ testptr(rax, rax); // test result
1689 1689 __ jcc(Assembler::zero, dispatch); // no osr if null
1690 1690 // nmethod may have been invalidated (VM may block upon call_VM return)
1691 1691 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1692 1692 __ cmpl(rcx, InvalidOSREntryBci);
1693 1693 __ jcc(Assembler::equal, dispatch);
1694 1694
1695 1695 // We have the address of an on stack replacement routine in rax,
1696 1696 // We need to prepare to execute the OSR method. First we must
1697 1697 // migrate the locals and monitors off of the stack.
1698 1698
1699 1699 __ mov(rbx, rax); // save the nmethod
1700 1700
1701 1701 const Register thread = rcx;
1702 1702 __ get_thread(thread);
1703 1703 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1704 1704 // rax, is OSR buffer, move it to expected parameter location
1705 1705 __ mov(rcx, rax);
1706 1706
1707 1707 // pop the interpreter frame
1708 1708 __ movptr(rdx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1709 1709 __ leave(); // remove frame anchor
1710 1710 __ pop(rdi); // get return address
1711 1711 __ mov(rsp, rdx); // set sp to sender sp
1712 1712
1713 1713 // Align stack pointer for compiled code (note that caller is
1714 1714 // responsible for undoing this fixup by remembering the old SP
1715 1715 // in an rbp,-relative location)
1716 1716 __ andptr(rsp, -(StackAlignmentInBytes));
1717 1717
1718 1718 // push the (possibly adjusted) return address
1719 1719 __ push(rdi);
1720 1720
1721 1721 // and begin the OSR nmethod
1722 1722 __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
1723 1723 }
1724 1724 }
1725 1725 }
1726 1726
1727 1727
1728 1728 void TemplateTable::if_0cmp(Condition cc) {
1729 1729 transition(itos, vtos);
1730 1730 // assume branch is more often taken than not (loops use backward branches)
1731 1731 Label not_taken;
1732 1732 __ testl(rax, rax);
1733 1733 __ jcc(j_not(cc), not_taken);
1734 1734 branch(false, false);
1735 1735 __ bind(not_taken);
1736 1736 __ profile_not_taken_branch(rax);
1737 1737 }
1738 1738
1739 1739
1740 1740 void TemplateTable::if_icmp(Condition cc) {
1741 1741 transition(itos, vtos);
1742 1742 // assume branch is more often taken than not (loops use backward branches)
1743 1743 Label not_taken;
1744 1744 __ pop_i(rdx);
1745 1745 __ cmpl(rdx, rax);
1746 1746 __ jcc(j_not(cc), not_taken);
1747 1747 branch(false, false);
1748 1748 __ bind(not_taken);
1749 1749 __ profile_not_taken_branch(rax);
1750 1750 }
1751 1751
1752 1752
1753 1753 void TemplateTable::if_nullcmp(Condition cc) {
1754 1754 transition(atos, vtos);
1755 1755 // assume branch is more often taken than not (loops use backward branches)
1756 1756 Label not_taken;
1757 1757 __ testptr(rax, rax);
1758 1758 __ jcc(j_not(cc), not_taken);
1759 1759 branch(false, false);
1760 1760 __ bind(not_taken);
1761 1761 __ profile_not_taken_branch(rax);
1762 1762 }
1763 1763
1764 1764
1765 1765 void TemplateTable::if_acmp(Condition cc) {
1766 1766 transition(atos, vtos);
1767 1767 // assume branch is more often taken than not (loops use backward branches)
1768 1768 Label not_taken;
1769 1769 __ pop_ptr(rdx);
1770 1770 __ cmpptr(rdx, rax);
1771 1771 __ jcc(j_not(cc), not_taken);
1772 1772 branch(false, false);
1773 1773 __ bind(not_taken);
1774 1774 __ profile_not_taken_branch(rax);
1775 1775 }
1776 1776
1777 1777
1778 1778 void TemplateTable::ret() {
1779 1779 transition(vtos, vtos);
1780 1780 locals_index(rbx);
1781 1781 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1782 1782 __ profile_ret(rbx, rcx);
1783 1783 __ get_method(rax);
1784 1784 __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
1785 1785 __ lea(rsi, Address(rsi, rbx, Address::times_1,
1786 1786 constMethodOopDesc::codes_offset()));
1787 1787 __ dispatch_next(vtos);
1788 1788 }
1789 1789
1790 1790
1791 1791 void TemplateTable::wide_ret() {
1792 1792 transition(vtos, vtos);
1793 1793 locals_index_wide(rbx);
1794 1794 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1795 1795 __ profile_ret(rbx, rcx);
1796 1796 __ get_method(rax);
1797 1797 __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
1798 1798 __ lea(rsi, Address(rsi, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
1799 1799 __ dispatch_next(vtos);
1800 1800 }
1801 1801
1802 1802
1803 1803 void TemplateTable::tableswitch() {
1804 1804 Label default_case, continue_execution;
1805 1805 transition(itos, vtos);
1806 1806 // align rsi
1807 1807 __ lea(rbx, at_bcp(wordSize));
1808 1808 __ andptr(rbx, -wordSize);
1809 1809 // load lo & hi
1810 1810 __ movl(rcx, Address(rbx, 1 * wordSize));
1811 1811 __ movl(rdx, Address(rbx, 2 * wordSize));
1812 1812 __ bswapl(rcx);
1813 1813 __ bswapl(rdx);
1814 1814 // check against lo & hi
1815 1815 __ cmpl(rax, rcx);
1816 1816 __ jccb(Assembler::less, default_case);
1817 1817 __ cmpl(rax, rdx);
1818 1818 __ jccb(Assembler::greater, default_case);
1819 1819 // lookup dispatch offset
1820 1820 __ subl(rax, rcx);
1821 1821 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1822 1822 __ profile_switch_case(rax, rbx, rcx);
1823 1823 // continue execution
1824 1824 __ bind(continue_execution);
1825 1825 __ bswapl(rdx);
1826 1826 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1827 1827 __ addptr(rsi, rdx);
1828 1828 __ dispatch_only(vtos);
1829 1829 // handle default
1830 1830 __ bind(default_case);
1831 1831 __ profile_switch_default(rax);
1832 1832 __ movl(rdx, Address(rbx, 0));
1833 1833 __ jmp(continue_execution);
1834 1834 }
1835 1835
1836 1836
1837 1837 void TemplateTable::lookupswitch() {
1838 1838 transition(itos, itos);
1839 1839 __ stop("lookupswitch bytecode should have been rewritten");
1840 1840 }
1841 1841
1842 1842
1843 1843 void TemplateTable::fast_linearswitch() {
1844 1844 transition(itos, vtos);
1845 1845 Label loop_entry, loop, found, continue_execution;
1846 1846 // bswapl rax, so we can avoid bswapping the table entries
1847 1847 __ bswapl(rax);
1848 1848 // align rsi
1849 1849 __ lea(rbx, at_bcp(wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1850 1850 __ andptr(rbx, -wordSize);
1851 1851 // set counter
1852 1852 __ movl(rcx, Address(rbx, wordSize));
1853 1853 __ bswapl(rcx);
1854 1854 __ jmpb(loop_entry);
1855 1855 // table search
1856 1856 __ bind(loop);
1857 1857 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * wordSize));
1858 1858 __ jccb(Assembler::equal, found);
1859 1859 __ bind(loop_entry);
1860 1860 __ decrementl(rcx);
1861 1861 __ jcc(Assembler::greaterEqual, loop);
1862 1862 // default case
1863 1863 __ profile_switch_default(rax);
1864 1864 __ movl(rdx, Address(rbx, 0));
1865 1865 __ jmpb(continue_execution);
1866 1866 // entry found -> get offset
1867 1867 __ bind(found);
1868 1868 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * wordSize));
1869 1869 __ profile_switch_case(rcx, rax, rbx);
1870 1870 // continue execution
1871 1871 __ bind(continue_execution);
1872 1872 __ bswapl(rdx);
1873 1873 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1874 1874 __ addptr(rsi, rdx);
1875 1875 __ dispatch_only(vtos);
1876 1876 }
1877 1877
1878 1878
1879 1879 void TemplateTable::fast_binaryswitch() {
1880 1880 transition(itos, vtos);
1881 1881 // Implementation using the following core algorithm:
1882 1882 //
1883 1883 // int binary_search(int key, LookupswitchPair* array, int n) {
1884 1884 // // Binary search according to "Methodik des Programmierens" by
1885 1885 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1886 1886 // int i = 0;
1887 1887 // int j = n;
1888 1888 // while (i+1 < j) {
1889 1889 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1890 1890 // // with Q: for all i: 0 <= i < n: key < a[i]
1891 1891 // // where a stands for the array and assuming that the (inexisting)
1892 1892 // // element a[n] is infinitely big.
1893 1893 // int h = (i + j) >> 1;
1894 1894 // // i < h < j
1895 1895 // if (key < array[h].fast_match()) {
1896 1896 // j = h;
1897 1897 // } else {
1898 1898 // i = h;
1899 1899 // }
1900 1900 // }
1901 1901 // // R: a[i] <= key < a[i+1] or Q
1902 1902 // // (i.e., if key is within array, i is the correct index)
1903 1903 // return i;
1904 1904 // }
1905 1905
1906 1906 // register allocation
1907 1907 const Register key = rax; // already set (tosca)
1908 1908 const Register array = rbx;
1909 1909 const Register i = rcx;
1910 1910 const Register j = rdx;
1911 1911 const Register h = rdi; // needs to be restored
1912 1912 const Register temp = rsi;
1913 1913 // setup array
1914 1914 __ save_bcp();
1915 1915
1916 1916 __ lea(array, at_bcp(3*wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1917 1917 __ andptr(array, -wordSize);
1918 1918 // initialize i & j
1919 1919 __ xorl(i, i); // i = 0;
1920 1920 __ movl(j, Address(array, -wordSize)); // j = length(array);
1921 1921 // Convert j into native byteordering
1922 1922 __ bswapl(j);
1923 1923 // and start
1924 1924 Label entry;
1925 1925 __ jmp(entry);
1926 1926
1927 1927 // binary search loop
1928 1928 { Label loop;
1929 1929 __ bind(loop);
1930 1930 // int h = (i + j) >> 1;
1931 1931 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
1932 1932 __ sarl(h, 1); // h = (i + j) >> 1;
1933 1933 // if (key < array[h].fast_match()) {
1934 1934 // j = h;
1935 1935 // } else {
1936 1936 // i = h;
1937 1937 // }
1938 1938 // Convert array[h].match to native byte-ordering before compare
1939 1939 __ movl(temp, Address(array, h, Address::times_8, 0*wordSize));
1940 1940 __ bswapl(temp);
1941 1941 __ cmpl(key, temp);
1942 1942 if (VM_Version::supports_cmov()) {
1943 1943 __ cmovl(Assembler::less , j, h); // j = h if (key < array[h].fast_match())
1944 1944 __ cmovl(Assembler::greaterEqual, i, h); // i = h if (key >= array[h].fast_match())
1945 1945 } else {
1946 1946 Label set_i, end_of_if;
1947 1947 __ jccb(Assembler::greaterEqual, set_i); // {
1948 1948 __ mov(j, h); // j = h;
1949 1949 __ jmp(end_of_if); // }
1950 1950 __ bind(set_i); // else {
1951 1951 __ mov(i, h); // i = h;
1952 1952 __ bind(end_of_if); // }
1953 1953 }
1954 1954 // while (i+1 < j)
1955 1955 __ bind(entry);
1956 1956 __ leal(h, Address(i, 1)); // i+1
1957 1957 __ cmpl(h, j); // i+1 < j
1958 1958 __ jcc(Assembler::less, loop);
1959 1959 }
1960 1960
1961 1961 // end of binary search, result index is i (must check again!)
1962 1962 Label default_case;
1963 1963 // Convert array[i].match to native byte-ordering before compare
1964 1964 __ movl(temp, Address(array, i, Address::times_8, 0*wordSize));
1965 1965 __ bswapl(temp);
1966 1966 __ cmpl(key, temp);
1967 1967 __ jcc(Assembler::notEqual, default_case);
1968 1968
1969 1969 // entry found -> j = offset
1970 1970 __ movl(j , Address(array, i, Address::times_8, 1*wordSize));
1971 1971 __ profile_switch_case(i, key, array);
1972 1972 __ bswapl(j);
1973 1973 LP64_ONLY(__ movslq(j, j));
1974 1974 __ restore_bcp();
1975 1975 __ restore_locals(); // restore rdi
1976 1976 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
1977 1977
1978 1978 __ addptr(rsi, j);
1979 1979 __ dispatch_only(vtos);
1980 1980
1981 1981 // default case -> j = default offset
1982 1982 __ bind(default_case);
1983 1983 __ profile_switch_default(i);
1984 1984 __ movl(j, Address(array, -2*wordSize));
1985 1985 __ bswapl(j);
1986 1986 LP64_ONLY(__ movslq(j, j));
1987 1987 __ restore_bcp();
1988 1988 __ restore_locals(); // restore rdi
1989 1989 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
1990 1990 __ addptr(rsi, j);
1991 1991 __ dispatch_only(vtos);
1992 1992 }
1993 1993
1994 1994
1995 1995 void TemplateTable::_return(TosState state) {
1996 1996 transition(state, state);
1997 1997 assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
1998 1998
1999 1999 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2000 2000 assert(state == vtos, "only valid state");
2001 2001 __ movptr(rax, aaddress(0));
2002 2002 __ movptr(rdi, Address(rax, oopDesc::klass_offset_in_bytes()));
2003 2003 __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
2004 2004 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2005 2005 Label skip_register_finalizer;
2006 2006 __ jcc(Assembler::zero, skip_register_finalizer);
2007 2007
2008 2008 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), rax);
2009 2009
2010 2010 __ bind(skip_register_finalizer);
2011 2011 }
2012 2012
2013 2013 __ remove_activation(state, rsi);
2014 2014 __ jmp(rsi);
2015 2015 }
2016 2016
2017 2017
2018 2018 // ----------------------------------------------------------------------------
2019 2019 // Volatile variables demand their effects be made known to all CPU's in
2020 2020 // order. Store buffers on most chips allow reads & writes to reorder; the
2021 2021 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2022 2022 // memory barrier (i.e., it's not sufficient that the interpreter does not
2023 2023 // reorder volatile references, the hardware also must not reorder them).
2024 2024 //
2025 2025 // According to the new Java Memory Model (JMM):
2026 2026 // (1) All volatiles are serialized wrt to each other.
2027 2027 // ALSO reads & writes act as aquire & release, so:
2028 2028 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2029 2029 // the read float up to before the read. It's OK for non-volatile memory refs
2030 2030 // that happen before the volatile read to float down below it.
2031 2031 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2032 2032 // that happen BEFORE the write float down to after the write. It's OK for
2033 2033 // non-volatile memory refs that happen after the volatile write to float up
2034 2034 // before it.
2035 2035 //
2036 2036 // We only put in barriers around volatile refs (they are expensive), not
2037 2037 // _between_ memory refs (that would require us to track the flavor of the
2038 2038 // previous memory refs). Requirements (2) and (3) require some barriers
2039 2039 // before volatile stores and after volatile loads. These nearly cover
2040 2040 // requirement (1) but miss the volatile-store-volatile-load case. This final
2041 2041 // case is placed after volatile-stores although it could just as well go
2042 2042 // before volatile-loads.
2043 2043 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2044 2044 // Helper function to insert a is-volatile test and memory barrier
2045 2045 if( !os::is_MP() ) return; // Not needed on single CPU
2046 2046 __ membar(order_constraint);
2047 2047 }
2048 2048
2049 2049 void TemplateTable::resolve_cache_and_index(int byte_no,
2050 2050 Register result,
2051 2051 Register Rcache,
2052 2052 Register index,
2053 2053 size_t index_size) {
2054 2054 Register temp = rbx;
2055 2055
2056 2056 assert_different_registers(result, Rcache, index, temp);
2057 2057
2058 2058 Label resolved;
2059 2059 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2060 2060 if (byte_no == f1_oop) {
2061 2061 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
2062 2062 // This kind of CP cache entry does not need to match the flags byte, because
2063 2063 // there is a 1-1 relation between bytecode type and CP entry type.
2064 2064 assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
2065 2065 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2066 2066 __ testptr(result, result);
2067 2067 __ jcc(Assembler::notEqual, resolved);
2068 2068 } else {
2069 2069 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2070 2070 assert(result == noreg, ""); //else change code for setting result
2071 2071 const int shift_count = (1 + byte_no)*BitsPerByte;
2072 2072 __ movl(temp, Address(Rcache, index, Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
2073 2073 __ shrl(temp, shift_count);
2074 2074 // have we resolved this bytecode?
2075 2075 __ andl(temp, 0xFF);
2076 2076 __ cmpl(temp, (int)bytecode());
2077 2077 __ jcc(Assembler::equal, resolved);
2078 2078 }
2079 2079
2080 2080 // resolve first time through
2081 2081 address entry;
2082 2082 switch (bytecode()) {
2083 2083 case Bytecodes::_getstatic : // fall through
2084 2084 case Bytecodes::_putstatic : // fall through
2085 2085 case Bytecodes::_getfield : // fall through
2086 2086 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2087 2087 case Bytecodes::_invokevirtual : // fall through
2088 2088 case Bytecodes::_invokespecial : // fall through
2089 2089 case Bytecodes::_invokestatic : // fall through
2090 2090 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2091 2091 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2092 2092 case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
2093 2093 case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
2094 2094 default : ShouldNotReachHere(); break;
2095 2095 }
2096 2096 __ movl(temp, (int)bytecode());
2097 2097 __ call_VM(noreg, entry, temp);
2098 2098 // Update registers with resolved info
2099 2099 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2100 2100 if (result != noreg)
2101 2101 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2102 2102 __ bind(resolved);
2103 2103 }
2104 2104
2105 2105
2106 2106 // The cache and index registers must be set before call
2107 2107 void TemplateTable::load_field_cp_cache_entry(Register obj,
2108 2108 Register cache,
2109 2109 Register index,
2110 2110 Register off,
2111 2111 Register flags,
2112 2112 bool is_static = false) {
2113 2113 assert_different_registers(cache, index, flags, off);
2114 2114
2115 2115 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2116 2116 // Field offset
2117 2117 __ movptr(off, Address(cache, index, Address::times_ptr,
2118 2118 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())));
2119 2119 // Flags
2120 2120 __ movl(flags, Address(cache, index, Address::times_ptr,
2121 2121 in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())));
2122 2122
2123 2123 // klass overwrite register
2124 2124 if (is_static) {
2125 2125 __ movptr(obj, Address(cache, index, Address::times_ptr,
2126 2126 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())));
2127 2127 }
2128 2128 }
2129 2129
2130 2130 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2131 2131 Register method,
2132 2132 Register itable_index,
2133 2133 Register flags,
2134 2134 bool is_invokevirtual,
2135 2135 bool is_invokevfinal /*unused*/,
2136 2136 bool is_invokedynamic) {
2137 2137 // setup registers
2138 2138 const Register cache = rcx;
2139 2139 const Register index = rdx;
2140 2140 assert_different_registers(method, flags);
2141 2141 assert_different_registers(method, cache, index);
2142 2142 assert_different_registers(itable_index, flags);
2143 2143 assert_different_registers(itable_index, cache, index);
2144 2144 // determine constant pool cache field offsets
2145 2145 const int method_offset = in_bytes(
2146 2146 constantPoolCacheOopDesc::base_offset() +
2147 2147 (is_invokevirtual
2148 2148 ? ConstantPoolCacheEntry::f2_offset()
2149 2149 : ConstantPoolCacheEntry::f1_offset()
2150 2150 )
2151 2151 );
2152 2152 const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2153 2153 ConstantPoolCacheEntry::flags_offset());
2154 2154 // access constant pool cache fields
2155 2155 const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2156 2156 ConstantPoolCacheEntry::f2_offset());
2157 2157
2158 2158 if (byte_no == f1_oop) {
2159 2159 // Resolved f1_oop goes directly into 'method' register.
2160 2160 assert(is_invokedynamic, "");
2161 2161 resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
2162 2162 } else {
2163 2163 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2164 2164 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2165 2165 }
2166 2166 if (itable_index != noreg) {
2167 2167 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2168 2168 }
2169 2169 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2170 2170 }
2171 2171
2172 2172
2173 2173 // The registers cache and index expected to be set before call.
2174 2174 // Correct values of the cache and index registers are preserved.
2175 2175 void TemplateTable::jvmti_post_field_access(Register cache,
2176 2176 Register index,
2177 2177 bool is_static,
2178 2178 bool has_tos) {
2179 2179 if (JvmtiExport::can_post_field_access()) {
2180 2180 // Check to see if a field access watch has been set before we take
2181 2181 // the time to call into the VM.
2182 2182 Label L1;
2183 2183 assert_different_registers(cache, index, rax);
2184 2184 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2185 2185 __ testl(rax,rax);
2186 2186 __ jcc(Assembler::zero, L1);
2187 2187
2188 2188 // cache entry pointer
2189 2189 __ addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
2190 2190 __ shll(index, LogBytesPerWord);
2191 2191 __ addptr(cache, index);
2192 2192 if (is_static) {
2193 2193 __ xorptr(rax, rax); // NULL object reference
2194 2194 } else {
2195 2195 __ pop(atos); // Get the object
2196 2196 __ verify_oop(rax);
2197 2197 __ push(atos); // Restore stack state
2198 2198 }
2199 2199 // rax,: object pointer or NULL
2200 2200 // cache: cache entry pointer
2201 2201 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2202 2202 rax, cache);
2203 2203 __ get_cache_and_index_at_bcp(cache, index, 1);
2204 2204 __ bind(L1);
2205 2205 }
2206 2206 }
2207 2207
2208 2208 void TemplateTable::pop_and_check_object(Register r) {
2209 2209 __ pop_ptr(r);
2210 2210 __ null_check(r); // for field access must check obj.
2211 2211 __ verify_oop(r);
2212 2212 }
2213 2213
2214 2214 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2215 2215 transition(vtos, vtos);
2216 2216
2217 2217 const Register cache = rcx;
2218 2218 const Register index = rdx;
2219 2219 const Register obj = rcx;
2220 2220 const Register off = rbx;
2221 2221 const Register flags = rax;
2222 2222
2223 2223 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2224 2224 jvmti_post_field_access(cache, index, is_static, false);
2225 2225 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2226 2226
2227 2227 if (!is_static) pop_and_check_object(obj);
2228 2228
2229 2229 const Address lo(obj, off, Address::times_1, 0*wordSize);
2230 2230 const Address hi(obj, off, Address::times_1, 1*wordSize);
2231 2231
2232 2232 Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2233 2233
2234 2234 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2235 2235 assert(btos == 0, "change code, btos != 0");
2236 2236 // btos
2237 2237 __ andptr(flags, 0x0f);
2238 2238 __ jcc(Assembler::notZero, notByte);
2239 2239
2240 2240 __ load_signed_byte(rax, lo );
2241 2241 __ push(btos);
2242 2242 // Rewrite bytecode to be faster
2243 2243 if (!is_static) {
2244 2244 patch_bytecode(Bytecodes::_fast_bgetfield, rcx, rbx);
2245 2245 }
2246 2246 __ jmp(Done);
2247 2247
2248 2248 __ bind(notByte);
2249 2249 // itos
2250 2250 __ cmpl(flags, itos );
2251 2251 __ jcc(Assembler::notEqual, notInt);
2252 2252
2253 2253 __ movl(rax, lo );
2254 2254 __ push(itos);
2255 2255 // Rewrite bytecode to be faster
2256 2256 if (!is_static) {
2257 2257 patch_bytecode(Bytecodes::_fast_igetfield, rcx, rbx);
2258 2258 }
2259 2259 __ jmp(Done);
2260 2260
2261 2261 __ bind(notInt);
2262 2262 // atos
2263 2263 __ cmpl(flags, atos );
2264 2264 __ jcc(Assembler::notEqual, notObj);
2265 2265
2266 2266 __ movl(rax, lo );
2267 2267 __ push(atos);
2268 2268 if (!is_static) {
2269 2269 patch_bytecode(Bytecodes::_fast_agetfield, rcx, rbx);
2270 2270 }
2271 2271 __ jmp(Done);
2272 2272
2273 2273 __ bind(notObj);
2274 2274 // ctos
2275 2275 __ cmpl(flags, ctos );
2276 2276 __ jcc(Assembler::notEqual, notChar);
2277 2277
2278 2278 __ load_unsigned_short(rax, lo );
2279 2279 __ push(ctos);
2280 2280 if (!is_static) {
2281 2281 patch_bytecode(Bytecodes::_fast_cgetfield, rcx, rbx);
2282 2282 }
2283 2283 __ jmp(Done);
2284 2284
2285 2285 __ bind(notChar);
2286 2286 // stos
2287 2287 __ cmpl(flags, stos );
2288 2288 __ jcc(Assembler::notEqual, notShort);
2289 2289
2290 2290 __ load_signed_short(rax, lo );
2291 2291 __ push(stos);
2292 2292 if (!is_static) {
2293 2293 patch_bytecode(Bytecodes::_fast_sgetfield, rcx, rbx);
2294 2294 }
2295 2295 __ jmp(Done);
2296 2296
2297 2297 __ bind(notShort);
2298 2298 // ltos
2299 2299 __ cmpl(flags, ltos );
2300 2300 __ jcc(Assembler::notEqual, notLong);
2301 2301
2302 2302 // Generate code as if volatile. There just aren't enough registers to
2303 2303 // save that information and this code is faster than the test.
2304 2304 __ fild_d(lo); // Must load atomically
2305 2305 __ subptr(rsp,2*wordSize); // Make space for store
2306 2306 __ fistp_d(Address(rsp,0));
2307 2307 __ pop(rax);
2308 2308 __ pop(rdx);
2309 2309
2310 2310 __ push(ltos);
2311 2311 // Don't rewrite to _fast_lgetfield for potential volatile case.
2312 2312 __ jmp(Done);
2313 2313
2314 2314 __ bind(notLong);
2315 2315 // ftos
2316 2316 __ cmpl(flags, ftos );
2317 2317 __ jcc(Assembler::notEqual, notFloat);
2318 2318
2319 2319 __ fld_s(lo);
2320 2320 __ push(ftos);
2321 2321 if (!is_static) {
2322 2322 patch_bytecode(Bytecodes::_fast_fgetfield, rcx, rbx);
2323 2323 }
2324 2324 __ jmp(Done);
2325 2325
2326 2326 __ bind(notFloat);
2327 2327 // dtos
2328 2328 __ cmpl(flags, dtos );
2329 2329 __ jcc(Assembler::notEqual, notDouble);
2330 2330
2331 2331 __ fld_d(lo);
2332 2332 __ push(dtos);
2333 2333 if (!is_static) {
2334 2334 patch_bytecode(Bytecodes::_fast_dgetfield, rcx, rbx);
2335 2335 }
2336 2336 __ jmpb(Done);
2337 2337
2338 2338 __ bind(notDouble);
2339 2339
2340 2340 __ stop("Bad state");
2341 2341
2342 2342 __ bind(Done);
2343 2343 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2344 2344 // volatile_barrier( );
2345 2345 }
2346 2346
2347 2347
2348 2348 void TemplateTable::getfield(int byte_no) {
2349 2349 getfield_or_static(byte_no, false);
2350 2350 }
2351 2351
2352 2352
2353 2353 void TemplateTable::getstatic(int byte_no) {
2354 2354 getfield_or_static(byte_no, true);
2355 2355 }
2356 2356
2357 2357 // The registers cache and index expected to be set before call.
2358 2358 // The function may destroy various registers, just not the cache and index registers.
2359 2359 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2360 2360
2361 2361 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2362 2362
2363 2363 if (JvmtiExport::can_post_field_modification()) {
2364 2364 // Check to see if a field modification watch has been set before we take
2365 2365 // the time to call into the VM.
2366 2366 Label L1;
2367 2367 assert_different_registers(cache, index, rax);
2368 2368 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2369 2369 __ testl(rax, rax);
2370 2370 __ jcc(Assembler::zero, L1);
2371 2371
2372 2372 // The cache and index registers have been already set.
2373 2373 // This allows to eliminate this call but the cache and index
2374 2374 // registers have to be correspondingly used after this line.
2375 2375 __ get_cache_and_index_at_bcp(rax, rdx, 1);
2376 2376
2377 2377 if (is_static) {
2378 2378 // Life is simple. Null out the object pointer.
2379 2379 __ xorptr(rbx, rbx);
2380 2380 } else {
2381 2381 // Life is harder. The stack holds the value on top, followed by the object.
2382 2382 // We don't know the size of the value, though; it could be one or two words
2383 2383 // depending on its type. As a result, we must find the type to determine where
2384 2384 // the object is.
2385 2385 Label two_word, valsize_known;
2386 2386 __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset +
2387 2387 ConstantPoolCacheEntry::flags_offset())));
2388 2388 __ mov(rbx, rsp);
2389 2389 __ shrl(rcx, ConstantPoolCacheEntry::tosBits);
2390 2390 // Make sure we don't need to mask rcx for tosBits after the above shift
2391 2391 ConstantPoolCacheEntry::verify_tosBits();
2392 2392 __ cmpl(rcx, ltos);
2393 2393 __ jccb(Assembler::equal, two_word);
2394 2394 __ cmpl(rcx, dtos);
2395 2395 __ jccb(Assembler::equal, two_word);
2396 2396 __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
2397 2397 __ jmpb(valsize_known);
2398 2398
2399 2399 __ bind(two_word);
2400 2400 __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
2401 2401
2402 2402 __ bind(valsize_known);
2403 2403 // setup object pointer
2404 2404 __ movptr(rbx, Address(rbx, 0));
2405 2405 }
2406 2406 // cache entry pointer
2407 2407 __ addptr(rax, in_bytes(cp_base_offset));
2408 2408 __ shll(rdx, LogBytesPerWord);
2409 2409 __ addptr(rax, rdx);
2410 2410 // object (tos)
2411 2411 __ mov(rcx, rsp);
2412 2412 // rbx,: object pointer set up above (NULL if static)
2413 2413 // rax,: cache entry pointer
2414 2414 // rcx: jvalue object on the stack
2415 2415 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2416 2416 rbx, rax, rcx);
2417 2417 __ get_cache_and_index_at_bcp(cache, index, 1);
2418 2418 __ bind(L1);
2419 2419 }
2420 2420 }
2421 2421
2422 2422
2423 2423 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2424 2424 transition(vtos, vtos);
2425 2425
2426 2426 const Register cache = rcx;
2427 2427 const Register index = rdx;
2428 2428 const Register obj = rcx;
2429 2429 const Register off = rbx;
2430 2430 const Register flags = rax;
2431 2431
2432 2432 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2433 2433 jvmti_post_field_mod(cache, index, is_static);
2434 2434 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2435 2435
2436 2436 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2437 2437 // volatile_barrier( );
2438 2438
2439 2439 Label notVolatile, Done;
2440 2440 __ movl(rdx, flags);
2441 2441 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2442 2442 __ andl(rdx, 0x1);
2443 2443
2444 2444 // field addresses
2445 2445 const Address lo(obj, off, Address::times_1, 0*wordSize);
2446 2446 const Address hi(obj, off, Address::times_1, 1*wordSize);
2447 2447
2448 2448 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2449 2449
2450 2450 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2451 2451 assert(btos == 0, "change code, btos != 0");
2452 2452 // btos
2453 2453 __ andl(flags, 0x0f);
2454 2454 __ jcc(Assembler::notZero, notByte);
2455 2455
2456 2456 __ pop(btos);
2457 2457 if (!is_static) pop_and_check_object(obj);
2458 2458 __ movb(lo, rax );
2459 2459 if (!is_static) {
2460 2460 patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx);
2461 2461 }
2462 2462 __ jmp(Done);
2463 2463
2464 2464 __ bind(notByte);
2465 2465 // itos
2466 2466 __ cmpl(flags, itos );
2467 2467 __ jcc(Assembler::notEqual, notInt);
2468 2468
2469 2469 __ pop(itos);
2470 2470 if (!is_static) pop_and_check_object(obj);
2471 2471
2472 2472 __ movl(lo, rax );
2473 2473 if (!is_static) {
2474 2474 patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx);
2475 2475 }
2476 2476 __ jmp(Done);
2477 2477
2478 2478 __ bind(notInt);
2479 2479 // atos
2480 2480 __ cmpl(flags, atos );
2481 2481 __ jcc(Assembler::notEqual, notObj);
2482 2482
2483 2483 __ pop(atos);
2484 2484 if (!is_static) pop_and_check_object(obj);
2485 2485
2486 2486 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2487 2487
2488 2488 if (!is_static) {
2489 2489 patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx);
2490 2490 }
2491 2491
2492 2492 __ jmp(Done);
2493 2493
2494 2494 __ bind(notObj);
2495 2495 // ctos
2496 2496 __ cmpl(flags, ctos );
2497 2497 __ jcc(Assembler::notEqual, notChar);
2498 2498
2499 2499 __ pop(ctos);
2500 2500 if (!is_static) pop_and_check_object(obj);
2501 2501 __ movw(lo, rax );
2502 2502 if (!is_static) {
2503 2503 patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx);
2504 2504 }
2505 2505 __ jmp(Done);
2506 2506
2507 2507 __ bind(notChar);
2508 2508 // stos
2509 2509 __ cmpl(flags, stos );
2510 2510 __ jcc(Assembler::notEqual, notShort);
2511 2511
2512 2512 __ pop(stos);
2513 2513 if (!is_static) pop_and_check_object(obj);
2514 2514 __ movw(lo, rax );
2515 2515 if (!is_static) {
2516 2516 patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx);
2517 2517 }
2518 2518 __ jmp(Done);
2519 2519
2520 2520 __ bind(notShort);
2521 2521 // ltos
2522 2522 __ cmpl(flags, ltos );
2523 2523 __ jcc(Assembler::notEqual, notLong);
2524 2524
2525 2525 Label notVolatileLong;
2526 2526 __ testl(rdx, rdx);
2527 2527 __ jcc(Assembler::zero, notVolatileLong);
2528 2528
2529 2529 __ pop(ltos); // overwrites rdx, do this after testing volatile.
2530 2530 if (!is_static) pop_and_check_object(obj);
2531 2531
2532 2532 // Replace with real volatile test
2533 2533 __ push(rdx);
2534 2534 __ push(rax); // Must update atomically with FIST
2535 2535 __ fild_d(Address(rsp,0)); // So load into FPU register
2536 2536 __ fistp_d(lo); // and put into memory atomically
2537 2537 __ addptr(rsp, 2*wordSize);
2538 2538 // volatile_barrier();
2539 2539 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2540 2540 Assembler::StoreStore));
2541 2541 // Don't rewrite volatile version
2542 2542 __ jmp(notVolatile);
2543 2543
2544 2544 __ bind(notVolatileLong);
2545 2545
2546 2546 __ pop(ltos); // overwrites rdx
2547 2547 if (!is_static) pop_and_check_object(obj);
2548 2548 NOT_LP64(__ movptr(hi, rdx));
2549 2549 __ movptr(lo, rax);
2550 2550 if (!is_static) {
2551 2551 patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx);
2552 2552 }
2553 2553 __ jmp(notVolatile);
2554 2554
2555 2555 __ bind(notLong);
2556 2556 // ftos
2557 2557 __ cmpl(flags, ftos );
2558 2558 __ jcc(Assembler::notEqual, notFloat);
2559 2559
2560 2560 __ pop(ftos);
2561 2561 if (!is_static) pop_and_check_object(obj);
2562 2562 __ fstp_s(lo);
2563 2563 if (!is_static) {
2564 2564 patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx);
2565 2565 }
2566 2566 __ jmp(Done);
2567 2567
2568 2568 __ bind(notFloat);
2569 2569 // dtos
2570 2570 __ cmpl(flags, dtos );
2571 2571 __ jcc(Assembler::notEqual, notDouble);
2572 2572
2573 2573 __ pop(dtos);
2574 2574 if (!is_static) pop_and_check_object(obj);
2575 2575 __ fstp_d(lo);
2576 2576 if (!is_static) {
2577 2577 patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx);
2578 2578 }
2579 2579 __ jmp(Done);
2580 2580
2581 2581 __ bind(notDouble);
2582 2582
2583 2583 __ stop("Bad state");
2584 2584
2585 2585 __ bind(Done);
2586 2586
2587 2587 // Check for volatile store
2588 2588 __ testl(rdx, rdx);
2589 2589 __ jcc(Assembler::zero, notVolatile);
2590 2590 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2591 2591 Assembler::StoreStore));
2592 2592 __ bind(notVolatile);
2593 2593 }
2594 2594
2595 2595
2596 2596 void TemplateTable::putfield(int byte_no) {
2597 2597 putfield_or_static(byte_no, false);
2598 2598 }
2599 2599
2600 2600
2601 2601 void TemplateTable::putstatic(int byte_no) {
2602 2602 putfield_or_static(byte_no, true);
2603 2603 }
2604 2604
2605 2605 void TemplateTable::jvmti_post_fast_field_mod() {
2606 2606 if (JvmtiExport::can_post_field_modification()) {
2607 2607 // Check to see if a field modification watch has been set before we take
2608 2608 // the time to call into the VM.
2609 2609 Label L2;
2610 2610 __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2611 2611 __ testl(rcx,rcx);
2612 2612 __ jcc(Assembler::zero, L2);
2613 2613 __ pop_ptr(rbx); // copy the object pointer from tos
2614 2614 __ verify_oop(rbx);
2615 2615 __ push_ptr(rbx); // put the object pointer back on tos
2616 2616 __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object
2617 2617 __ mov(rcx, rsp);
2618 2618 __ push_ptr(rbx); // save object pointer so we can steal rbx,
2619 2619 __ xorptr(rbx, rbx);
2620 2620 const Address lo_value(rcx, rbx, Address::times_1, 0*wordSize);
2621 2621 const Address hi_value(rcx, rbx, Address::times_1, 1*wordSize);
2622 2622 switch (bytecode()) { // load values into the jvalue object
2623 2623 case Bytecodes::_fast_bputfield: __ movb(lo_value, rax); break;
2624 2624 case Bytecodes::_fast_sputfield: __ movw(lo_value, rax); break;
2625 2625 case Bytecodes::_fast_cputfield: __ movw(lo_value, rax); break;
2626 2626 case Bytecodes::_fast_iputfield: __ movl(lo_value, rax); break;
2627 2627 case Bytecodes::_fast_lputfield:
2628 2628 NOT_LP64(__ movptr(hi_value, rdx));
2629 2629 __ movptr(lo_value, rax);
2630 2630 break;
2631 2631
2632 2632 // need to call fld_s() after fstp_s() to restore the value for below
2633 2633 case Bytecodes::_fast_fputfield: __ fstp_s(lo_value); __ fld_s(lo_value); break;
2634 2634
2635 2635 // need to call fld_d() after fstp_d() to restore the value for below
2636 2636 case Bytecodes::_fast_dputfield: __ fstp_d(lo_value); __ fld_d(lo_value); break;
2637 2637
2638 2638 // since rcx is not an object we don't call store_check() here
2639 2639 case Bytecodes::_fast_aputfield: __ movptr(lo_value, rax); break;
2640 2640
2641 2641 default: ShouldNotReachHere();
2642 2642 }
2643 2643 __ pop_ptr(rbx); // restore copy of object pointer
2644 2644
2645 2645 // Save rax, and sometimes rdx because call_VM() will clobber them,
2646 2646 // then use them for JVM/DI purposes
2647 2647 __ push(rax);
2648 2648 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2649 2649 // access constant pool cache entry
2650 2650 __ get_cache_entry_pointer_at_bcp(rax, rdx, 1);
2651 2651 __ verify_oop(rbx);
2652 2652 // rbx,: object pointer copied above
2653 2653 // rax,: cache entry pointer
2654 2654 // rcx: jvalue object on the stack
2655 2655 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx);
2656 2656 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx); // restore high value
2657 2657 __ pop(rax); // restore lower value
2658 2658 __ addptr(rsp, sizeof(jvalue)); // release jvalue object space
2659 2659 __ bind(L2);
2660 2660 }
2661 2661 }
2662 2662
2663 2663 void TemplateTable::fast_storefield(TosState state) {
2664 2664 transition(state, vtos);
2665 2665
2666 2666 ByteSize base = constantPoolCacheOopDesc::base_offset();
2667 2667
2668 2668 jvmti_post_fast_field_mod();
2669 2669
2670 2670 // access constant pool cache
2671 2671 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2672 2672
2673 2673 // test for volatile with rdx but rdx is tos register for lputfield.
2674 2674 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2675 2675 __ movl(rdx, Address(rcx, rbx, Address::times_ptr, in_bytes(base +
2676 2676 ConstantPoolCacheEntry::flags_offset())));
2677 2677
2678 2678 // replace index with field offset from cache entry
2679 2679 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2680 2680
2681 2681 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2682 2682 // volatile_barrier( );
2683 2683
2684 2684 Label notVolatile, Done;
2685 2685 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2686 2686 __ andl(rdx, 0x1);
2687 2687 // Check for volatile store
2688 2688 __ testl(rdx, rdx);
2689 2689 __ jcc(Assembler::zero, notVolatile);
2690 2690
2691 2691 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2692 2692
2693 2693 // Get object from stack
2694 2694 pop_and_check_object(rcx);
2695 2695
2696 2696 // field addresses
2697 2697 const Address lo(rcx, rbx, Address::times_1, 0*wordSize);
2698 2698 const Address hi(rcx, rbx, Address::times_1, 1*wordSize);
2699 2699
2700 2700 // access field
2701 2701 switch (bytecode()) {
2702 2702 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2703 2703 case Bytecodes::_fast_sputfield: // fall through
2704 2704 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2705 2705 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2706 2706 case Bytecodes::_fast_lputfield:
2707 2707 NOT_LP64(__ movptr(hi, rdx));
2708 2708 __ movptr(lo, rax);
2709 2709 break;
2710 2710 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2711 2711 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2712 2712 case Bytecodes::_fast_aputfield: {
2713 2713 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2714 2714 break;
2715 2715 }
2716 2716 default:
2717 2717 ShouldNotReachHere();
2718 2718 }
2719 2719
2720 2720 Label done;
2721 2721 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2722 2722 Assembler::StoreStore));
2723 2723 // Barriers are so large that short branch doesn't reach!
2724 2724 __ jmp(done);
2725 2725
2726 2726 // Same code as above, but don't need rdx to test for volatile.
2727 2727 __ bind(notVolatile);
2728 2728
2729 2729 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2730 2730
2731 2731 // Get object from stack
2732 2732 pop_and_check_object(rcx);
2733 2733
2734 2734 // access field
2735 2735 switch (bytecode()) {
2736 2736 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2737 2737 case Bytecodes::_fast_sputfield: // fall through
2738 2738 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2739 2739 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2740 2740 case Bytecodes::_fast_lputfield:
2741 2741 NOT_LP64(__ movptr(hi, rdx));
2742 2742 __ movptr(lo, rax);
2743 2743 break;
2744 2744 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2745 2745 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2746 2746 case Bytecodes::_fast_aputfield: {
2747 2747 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2748 2748 break;
2749 2749 }
2750 2750 default:
2751 2751 ShouldNotReachHere();
2752 2752 }
2753 2753 __ bind(done);
2754 2754 }
2755 2755
2756 2756
2757 2757 void TemplateTable::fast_accessfield(TosState state) {
2758 2758 transition(atos, state);
2759 2759
2760 2760 // do the JVMTI work here to avoid disturbing the register state below
2761 2761 if (JvmtiExport::can_post_field_access()) {
2762 2762 // Check to see if a field access watch has been set before we take
2763 2763 // the time to call into the VM.
2764 2764 Label L1;
2765 2765 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2766 2766 __ testl(rcx,rcx);
2767 2767 __ jcc(Assembler::zero, L1);
2768 2768 // access constant pool cache entry
2769 2769 __ get_cache_entry_pointer_at_bcp(rcx, rdx, 1);
2770 2770 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2771 2771 __ verify_oop(rax);
2772 2772 // rax,: object pointer copied above
2773 2773 // rcx: cache entry pointer
2774 2774 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx);
2775 2775 __ pop_ptr(rax); // restore object pointer
2776 2776 __ bind(L1);
2777 2777 }
2778 2778
2779 2779 // access constant pool cache
2780 2780 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2781 2781 // replace index with field offset from cache entry
2782 2782 __ movptr(rbx, Address(rcx,
2783 2783 rbx,
2784 2784 Address::times_ptr,
2785 2785 in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2786 2786
2787 2787
2788 2788 // rax,: object
2789 2789 __ verify_oop(rax);
2790 2790 __ null_check(rax);
2791 2791 // field addresses
2792 2792 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2793 2793 const Address hi = Address(rax, rbx, Address::times_1, 1*wordSize);
2794 2794
2795 2795 // access field
2796 2796 switch (bytecode()) {
2797 2797 case Bytecodes::_fast_bgetfield: __ movsbl(rax, lo ); break;
2798 2798 case Bytecodes::_fast_sgetfield: __ load_signed_short(rax, lo ); break;
2799 2799 case Bytecodes::_fast_cgetfield: __ load_unsigned_short(rax, lo ); break;
2800 2800 case Bytecodes::_fast_igetfield: __ movl(rax, lo); break;
2801 2801 case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten"); break;
2802 2802 case Bytecodes::_fast_fgetfield: __ fld_s(lo); break;
2803 2803 case Bytecodes::_fast_dgetfield: __ fld_d(lo); break;
2804 2804 case Bytecodes::_fast_agetfield: __ movptr(rax, lo); __ verify_oop(rax); break;
2805 2805 default:
2806 2806 ShouldNotReachHere();
2807 2807 }
2808 2808
2809 2809 // Doug Lea believes this is not needed with current Sparcs(TSO) and Intel(PSO)
2810 2810 // volatile_barrier( );
2811 2811 }
2812 2812
2813 2813 void TemplateTable::fast_xaccess(TosState state) {
2814 2814 transition(vtos, state);
2815 2815 // get receiver
2816 2816 __ movptr(rax, aaddress(0));
2817 2817 // access constant pool cache
2818 2818 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2819 2819 __ movptr(rbx, Address(rcx,
2820 2820 rdx,
2821 2821 Address::times_ptr,
2822 2822 in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2823 2823 // make sure exception is reported in correct bcp range (getfield is next instruction)
2824 2824 __ increment(rsi);
2825 2825 __ null_check(rax);
2826 2826 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2827 2827 if (state == itos) {
2828 2828 __ movl(rax, lo);
2829 2829 } else if (state == atos) {
2830 2830 __ movptr(rax, lo);
2831 2831 __ verify_oop(rax);
2832 2832 } else if (state == ftos) {
2833 2833 __ fld_s(lo);
2834 2834 } else {
2835 2835 ShouldNotReachHere();
2836 2836 }
2837 2837 __ decrement(rsi);
2838 2838 }
2839 2839
2840 2840
2841 2841
2842 2842 //----------------------------------------------------------------------------------------------------
2843 2843 // Calls
2844 2844
2845 2845 void TemplateTable::count_calls(Register method, Register temp) {
2846 2846 // implemented elsewhere
2847 2847 ShouldNotReachHere();
2848 2848 }
2849 2849
2850 2850
2851 2851 void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
2852 2852 // determine flags
2853 2853 Bytecodes::Code code = bytecode();
2854 2854 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2855 2855 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2856 2856 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2857 2857 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2858 2858 const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
2859 2859 const bool receiver_null_check = is_invokespecial;
2860 2860 const bool save_flags = is_invokeinterface || is_invokevirtual;
2861 2861 // setup registers & access constant pool cache
2862 2862 const Register recv = rcx;
2863 2863 const Register flags = rdx;
2864 2864 assert_different_registers(method, index, recv, flags);
2865 2865
2866 2866 // save 'interpreter return address'
2867 2867 __ save_bcp();
2868 2868
2869 2869 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2870 2870
2871 2871 // load receiver if needed (note: no return address pushed yet)
2872 2872 if (load_receiver) {
2873 2873 assert(!is_invokedynamic, "");
2874 2874 __ movl(recv, flags);
2875 2875 __ andl(recv, 0xFF);
2876 2876 // recv count is 0 based?
2877 2877 Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1));
2878 2878 __ movptr(recv, recv_addr);
2879 2879 __ verify_oop(recv);
2880 2880 }
2881 2881
2882 2882 // do null check if needed
2883 2883 if (receiver_null_check) {
2884 2884 __ null_check(recv);
2885 2885 }
2886 2886
2887 2887 if (save_flags) {
2888 2888 __ mov(rsi, flags);
2889 2889 }
2890 2890
2891 2891 // compute return type
2892 2892 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2893 2893 // Make sure we don't need to mask flags for tosBits after the above shift
2894 2894 ConstantPoolCacheEntry::verify_tosBits();
2895 2895 // load return address
2896 2896 {
2897 2897 address table_addr;
2898 2898 if (is_invokeinterface || is_invokedynamic)
2899 2899 table_addr = (address)Interpreter::return_5_addrs_by_index_table();
2900 2900 else
2901 2901 table_addr = (address)Interpreter::return_3_addrs_by_index_table();
2902 2902 ExternalAddress table(table_addr);
2903 2903 __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
2904 2904 }
2905 2905
2906 2906 // push return address
2907 2907 __ push(flags);
2908 2908
2909 2909 // Restore flag value from the constant pool cache, and restore rsi
2910 2910 // for later null checks. rsi is the bytecode pointer
2911 2911 if (save_flags) {
2912 2912 __ mov(flags, rsi);
2913 2913 __ restore_bcp();
2914 2914 }
2915 2915 }
2916 2916
2917 2917
2918 2918 void TemplateTable::invokevirtual_helper(Register index, Register recv,
2919 2919 Register flags) {
2920 2920
2921 2921 // Uses temporary registers rax, rdx
2922 2922 assert_different_registers(index, recv, rax, rdx);
2923 2923
2924 2924 // Test for an invoke of a final method
2925 2925 Label notFinal;
2926 2926 __ movl(rax, flags);
2927 2927 __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod));
2928 2928 __ jcc(Assembler::zero, notFinal);
2929 2929
2930 2930 Register method = index; // method must be rbx,
2931 2931 assert(method == rbx, "methodOop must be rbx, for interpreter calling convention");
2932 2932
2933 2933 // do the call - the index is actually the method to call
2934 2934 __ verify_oop(method);
2935 2935
2936 2936 // It's final, need a null check here!
2937 2937 __ null_check(recv);
2938 2938
2939 2939 // profile this call
2940 2940 __ profile_final_call(rax);
2941 2941
2942 2942 __ jump_from_interpreted(method, rax);
2943 2943
2944 2944 __ bind(notFinal);
2945 2945
2946 2946 // get receiver klass
2947 2947 __ null_check(recv, oopDesc::klass_offset_in_bytes());
2948 2948 // Keep recv in rcx for callee expects it there
2949 2949 __ movptr(rax, Address(recv, oopDesc::klass_offset_in_bytes()));
2950 2950 __ verify_oop(rax);
2951 2951
2952 2952 // profile this call
2953 2953 __ profile_virtual_call(rax, rdi, rdx);
2954 2954
2955 2955 // get target methodOop & entry point
2956 2956 const int base = instanceKlass::vtable_start_offset() * wordSize;
2957 2957 assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below");
2958 2958 __ movptr(method, Address(rax, index, Address::times_ptr, base + vtableEntry::method_offset_in_bytes()));
2959 2959 __ jump_from_interpreted(method, rdx);
2960 2960 }
2961 2961
2962 2962
2963 2963 void TemplateTable::invokevirtual(int byte_no) {
2964 2964 transition(vtos, vtos);
2965 2965 assert(byte_no == f2_byte, "use this argument");
2966 2966 prepare_invoke(rbx, noreg, byte_no);
2967 2967
2968 2968 // rbx,: index
2969 2969 // rcx: receiver
2970 2970 // rdx: flags
2971 2971
2972 2972 invokevirtual_helper(rbx, rcx, rdx);
2973 2973 }
2974 2974
2975 2975
2976 2976 void TemplateTable::invokespecial(int byte_no) {
2977 2977 transition(vtos, vtos);
2978 2978 assert(byte_no == f1_byte, "use this argument");
2979 2979 prepare_invoke(rbx, noreg, byte_no);
2980 2980 // do the call
2981 2981 __ verify_oop(rbx);
2982 2982 __ profile_call(rax);
2983 2983 __ jump_from_interpreted(rbx, rax);
2984 2984 }
2985 2985
2986 2986
2987 2987 void TemplateTable::invokestatic(int byte_no) {
2988 2988 transition(vtos, vtos);
2989 2989 assert(byte_no == f1_byte, "use this argument");
2990 2990 prepare_invoke(rbx, noreg, byte_no);
2991 2991 // do the call
2992 2992 __ verify_oop(rbx);
2993 2993 __ profile_call(rax);
2994 2994 __ jump_from_interpreted(rbx, rax);
2995 2995 }
2996 2996
2997 2997
2998 2998 void TemplateTable::fast_invokevfinal(int byte_no) {
2999 2999 transition(vtos, vtos);
3000 3000 assert(byte_no == f2_byte, "use this argument");
3001 3001 __ stop("fast_invokevfinal not used on x86");
3002 3002 }
3003 3003
3004 3004
3005 3005 void TemplateTable::invokeinterface(int byte_no) {
3006 3006 transition(vtos, vtos);
3007 3007 assert(byte_no == f1_byte, "use this argument");
3008 3008 prepare_invoke(rax, rbx, byte_no);
3009 3009
3010 3010 // rax,: Interface
3011 3011 // rbx,: index
3012 3012 // rcx: receiver
3013 3013 // rdx: flags
3014 3014
3015 3015 // Special case of invokeinterface called for virtual method of
3016 3016 // java.lang.Object. See cpCacheOop.cpp for details.
3017 3017 // This code isn't produced by javac, but could be produced by
3018 3018 // another compliant java compiler.
3019 3019 Label notMethod;
3020 3020 __ movl(rdi, rdx);
3021 3021 __ andl(rdi, (1 << ConstantPoolCacheEntry::methodInterface));
3022 3022 __ jcc(Assembler::zero, notMethod);
3023 3023
3024 3024 invokevirtual_helper(rbx, rcx, rdx);
3025 3025 __ bind(notMethod);
3026 3026
3027 3027 // Get receiver klass into rdx - also a null check
3028 3028 __ restore_locals(); // restore rdi
3029 3029 __ movptr(rdx, Address(rcx, oopDesc::klass_offset_in_bytes()));
3030 3030 __ verify_oop(rdx);
3031 3031
3032 3032 // profile this call
3033 3033 __ profile_virtual_call(rdx, rsi, rdi);
3034 3034
3035 3035 Label no_such_interface, no_such_method;
3036 3036
3037 3037 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3038 3038 rdx, rax, rbx,
3039 3039 // outputs: method, scan temp. reg
3040 3040 rbx, rsi,
3041 3041 no_such_interface);
3042 3042
3043 3043 // rbx,: methodOop to call
3044 3044 // rcx: receiver
3045 3045 // Check for abstract method error
3046 3046 // Note: This should be done more efficiently via a throw_abstract_method_error
3047 3047 // interpreter entry point and a conditional jump to it in case of a null
3048 3048 // method.
3049 3049 __ testptr(rbx, rbx);
3050 3050 __ jcc(Assembler::zero, no_such_method);
3051 3051
3052 3052 // do the call
3053 3053 // rcx: receiver
3054 3054 // rbx,: methodOop
3055 3055 __ jump_from_interpreted(rbx, rdx);
3056 3056 __ should_not_reach_here();
3057 3057
3058 3058 // exception handling code follows...
3059 3059 // note: must restore interpreter registers to canonical
3060 3060 // state for exception handling to work correctly!
3061 3061
3062 3062 __ bind(no_such_method);
3063 3063 // throw exception
3064 3064 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3065 3065 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3066 3066 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3067 3067 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3068 3068 // the call_VM checks for exception, so we should never return here.
3069 3069 __ should_not_reach_here();
3070 3070
3071 3071 __ bind(no_such_interface);
3072 3072 // throw exception
3073 3073 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3074 3074 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3075 3075 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3076 3076 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3077 3077 InterpreterRuntime::throw_IncompatibleClassChangeError));
3078 3078 // the call_VM checks for exception, so we should never return here.
3079 3079 __ should_not_reach_here();
3080 3080 }
3081 3081
3082 3082 void TemplateTable::invokedynamic(int byte_no) {
3083 3083 transition(vtos, vtos);
3084 3084
3085 3085 if (!EnableInvokeDynamic) {
3086 3086 // We should not encounter this bytecode if !EnableInvokeDynamic.
3087 3087 // The verifier will stop it. However, if we get past the verifier,
3088 3088 // this will stop the thread in a reasonable way, without crashing the JVM.
3089 3089 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3090 3090 InterpreterRuntime::throw_IncompatibleClassChangeError));
3091 3091 // the call_VM checks for exception, so we should never return here.
3092 3092 __ should_not_reach_here();
3093 3093 return;
3094 3094 }
3095 3095
3096 3096 assert(byte_no == f1_oop, "use this argument");
3097 3097 prepare_invoke(rax, rbx, byte_no);
3098 3098
3099 3099 // rax: CallSite object (f1)
3100 3100 // rbx: unused (f2)
3101 3101 // rcx: receiver address
3102 3102 // rdx: flags (unused)
3103 3103
3104 3104 Register rax_callsite = rax;
3105 3105 Register rcx_method_handle = rcx;
3106 3106
3107 3107 if (ProfileInterpreter) {
3108 3108 // %%% should make a type profile for any invokedynamic that takes a ref argument
3109 3109 // profile this call
3110 3110 __ profile_call(rsi);
3111 3111 }
3112 3112
3113 3113 __ movptr(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rcx)));
3114 3114 __ null_check(rcx_method_handle);
3115 3115 __ prepare_to_jump_from_interpreted();
3116 3116 __ jump_to_method_handle_entry(rcx_method_handle, rdx);
3117 3117 }
3118 3118
3119 3119 //----------------------------------------------------------------------------------------------------
3120 3120 // Allocation
3121 3121
3122 3122 void TemplateTable::_new() {
3123 3123 transition(vtos, atos);
3124 3124 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3125 3125 Label slow_case;
3126 3126 Label slow_case_no_pop;
3127 3127 Label done;
3128 3128 Label initialize_header;
3129 3129 Label initialize_object; // including clearing the fields
3130 3130 Label allocate_shared;
3131 3131
3132 3132 __ get_cpool_and_tags(rcx, rax);
3133 3133
3134 3134 // Make sure the class we're about to instantiate has been resolved.
3135 3135 // This is done before loading instanceKlass to be consistent with the order
3136 3136 // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put)
3137 3137 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
3138 3138 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
3139 3139 __ jcc(Assembler::notEqual, slow_case_no_pop);
3140 3140
3141 3141 // get instanceKlass
3142 3142 __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3143 3143 __ push(rcx); // save the contexts of klass for initializing the header
3144 3144
3145 3145 // make sure klass is initialized & doesn't have finalizer
3146 3146 // make sure klass is fully initialized
3147 3147 __ cmpl(Address(rcx, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
3148 3148 __ jcc(Assembler::notEqual, slow_case);
3149 3149
3150 3150 // get instance_size in instanceKlass (scaled to a count of bytes)
3151 3151 __ movl(rdx, Address(rcx, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
3152 3152 // test to see if it has a finalizer or is malformed in some way
3153 3153 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3154 3154 __ jcc(Assembler::notZero, slow_case);
3155 3155
3156 3156 //
3157 3157 // Allocate the instance
3158 3158 // 1) Try to allocate in the TLAB
3159 3159 // 2) if fail and the object is large allocate in the shared Eden
3160 3160 // 3) if the above fails (or is not applicable), go to a slow case
3161 3161 // (creates a new TLAB, etc.)
3162 3162
3163 3163 const bool allow_shared_alloc =
3164 3164 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3165 3165
3166 3166 const Register thread = rcx;
3167 3167 if (UseTLAB || allow_shared_alloc) {
3168 3168 __ get_thread(thread);
3169 3169 }
3170 3170
3171 3171 if (UseTLAB) {
3172 3172 __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
3173 3173 __ lea(rbx, Address(rax, rdx, Address::times_1));
3174 3174 __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
3175 3175 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3176 3176 __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3177 3177 if (ZeroTLAB) {
3178 3178 // the fields have been already cleared
3179 3179 __ jmp(initialize_header);
3180 3180 } else {
3181 3181 // initialize both the header and fields
3182 3182 __ jmp(initialize_object);
3183 3183 }
3184 3184 }
3185 3185
3186 3186 // Allocation in the shared Eden, if allowed.
3187 3187 //
3188 3188 // rdx: instance size in bytes
3189 3189 if (allow_shared_alloc) {
3190 3190 __ bind(allocate_shared);
3191 3191
3192 3192 ExternalAddress heap_top((address)Universe::heap()->top_addr());
3193 3193
3194 3194 Label retry;
3195 3195 __ bind(retry);
3196 3196 __ movptr(rax, heap_top);
3197 3197 __ lea(rbx, Address(rax, rdx, Address::times_1));
3198 3198 __ cmpptr(rbx, ExternalAddress((address)Universe::heap()->end_addr()));
3199 3199 __ jcc(Assembler::above, slow_case);
3200 3200
3201 3201 // Compare rax, with the top addr, and if still equal, store the new
3202 3202 // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
3203 3203 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3204 3204 //
3205 3205 // rax,: object begin
3206 3206 // rbx,: object end
3207 3207 // rdx: instance size in bytes
3208 3208 __ locked_cmpxchgptr(rbx, heap_top);
3209 3209
3210 3210 // if someone beat us on the allocation, try again, otherwise continue
3211 3211 __ jcc(Assembler::notEqual, retry);
3212 3212
3213 3213 __ incr_allocated_bytes(thread, rdx, 0);
3214 3214 }
3215 3215
3216 3216 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3217 3217 // The object is initialized before the header. If the object size is
3218 3218 // zero, go directly to the header initialization.
3219 3219 __ bind(initialize_object);
3220 3220 __ decrement(rdx, sizeof(oopDesc));
3221 3221 __ jcc(Assembler::zero, initialize_header);
3222 3222
3223 3223 // Initialize topmost object field, divide rdx by 8, check if odd and
3224 3224 // test if zero.
3225 3225 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3226 3226 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3227 3227
3228 3228 // rdx must have been multiple of 8
3229 3229 #ifdef ASSERT
3230 3230 // make sure rdx was multiple of 8
3231 3231 Label L;
3232 3232 // Ignore partial flag stall after shrl() since it is debug VM
3233 3233 __ jccb(Assembler::carryClear, L);
3234 3234 __ stop("object size is not multiple of 2 - adjust this code");
3235 3235 __ bind(L);
3236 3236 // rdx must be > 0, no extra check needed here
3237 3237 #endif
3238 3238
3239 3239 // initialize remaining object fields: rdx was a multiple of 8
3240 3240 { Label loop;
3241 3241 __ bind(loop);
3242 3242 __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
3243 3243 NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
3244 3244 __ decrement(rdx);
3245 3245 __ jcc(Assembler::notZero, loop);
3246 3246 }
3247 3247
3248 3248 // initialize object header only.
3249 3249 __ bind(initialize_header);
3250 3250 if (UseBiasedLocking) {
3251 3251 __ pop(rcx); // get saved klass back in the register.
3252 3252 __ movptr(rbx, Address(rcx, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
3253 3253 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
3254 3254 } else {
3255 3255 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
3256 3256 (int32_t)markOopDesc::prototype()); // header
3257 3257 __ pop(rcx); // get saved klass back in the register.
3258 3258 }
3259 3259 __ movptr(Address(rax, oopDesc::klass_offset_in_bytes()), rcx); // klass
3260 3260
3261 3261 {
3262 3262 SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
3263 3263 // Trigger dtrace event for fastpath
3264 3264 __ push(atos);
3265 3265 __ call_VM_leaf(
3266 3266 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3267 3267 __ pop(atos);
3268 3268 }
3269 3269
3270 3270 __ jmp(done);
3271 3271 }
3272 3272
3273 3273 // slow case
3274 3274 __ bind(slow_case);
3275 3275 __ pop(rcx); // restore stack pointer to what it was when we came in.
3276 3276 __ bind(slow_case_no_pop);
3277 3277 __ get_constant_pool(rax);
3278 3278 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3279 3279 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rax, rdx);
3280 3280
3281 3281 // continue
3282 3282 __ bind(done);
3283 3283 }
3284 3284
3285 3285
3286 3286 void TemplateTable::newarray() {
3287 3287 transition(itos, atos);
3288 3288 __ push_i(rax); // make sure everything is on the stack
3289 3289 __ load_unsigned_byte(rdx, at_bcp(1));
3290 3290 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), rdx, rax);
3291 3291 __ pop_i(rdx); // discard size
3292 3292 }
3293 3293
3294 3294
3295 3295 void TemplateTable::anewarray() {
3296 3296 transition(itos, atos);
3297 3297 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3298 3298 __ get_constant_pool(rcx);
3299 3299 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), rcx, rdx, rax);
3300 3300 }
3301 3301
3302 3302
3303 3303 void TemplateTable::arraylength() {
3304 3304 transition(atos, itos);
3305 3305 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3306 3306 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3307 3307 }
3308 3308
3309 3309
3310 3310 void TemplateTable::checkcast() {
3311 3311 transition(atos, atos);
3312 3312 Label done, is_null, ok_is_subtype, quicked, resolved;
3313 3313 __ testptr(rax, rax); // Object is in EAX
3314 3314 __ jcc(Assembler::zero, is_null);
3315 3315
3316 3316 // Get cpool & tags index
3317 3317 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3318 3318 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3319 3319 // See if bytecode has already been quicked
3320 3320 __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
3321 3321 __ jcc(Assembler::equal, quicked);
3322 3322
3323 3323 __ push(atos);
3324 3324 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3325 3325 __ pop_ptr(rdx);
3326 3326 __ jmpb(resolved);
3327 3327
3328 3328 // Get superklass in EAX and subklass in EBX
3329 3329 __ bind(quicked);
3330 3330 __ mov(rdx, rax); // Save object in EDX; EAX needed for subtype check
3331 3331 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3332 3332
3333 3333 __ bind(resolved);
3334 3334 __ movptr(rbx, Address(rdx, oopDesc::klass_offset_in_bytes()));
3335 3335
3336 3336 // Generate subtype check. Blows ECX. Resets EDI. Object in EDX.
3337 3337 // Superklass in EAX. Subklass in EBX.
3338 3338 __ gen_subtype_check( rbx, ok_is_subtype );
3339 3339
3340 3340 // Come here on failure
3341 3341 __ push(rdx);
3342 3342 // object is at TOS
3343 3343 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3344 3344
3345 3345 // Come here on success
3346 3346 __ bind(ok_is_subtype);
3347 3347 __ mov(rax,rdx); // Restore object in EDX
3348 3348
3349 3349 // Collect counts on whether this check-cast sees NULLs a lot or not.
3350 3350 if (ProfileInterpreter) {
3351 3351 __ jmp(done);
3352 3352 __ bind(is_null);
3353 3353 __ profile_null_seen(rcx);
3354 3354 } else {
3355 3355 __ bind(is_null); // same as 'done'
3356 3356 }
3357 3357 __ bind(done);
3358 3358 }
3359 3359
3360 3360
3361 3361 void TemplateTable::instanceof() {
3362 3362 transition(atos, itos);
3363 3363 Label done, is_null, ok_is_subtype, quicked, resolved;
3364 3364 __ testptr(rax, rax);
3365 3365 __ jcc(Assembler::zero, is_null);
3366 3366
3367 3367 // Get cpool & tags index
3368 3368 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3369 3369 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3370 3370 // See if bytecode has already been quicked
3371 3371 __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
3372 3372 __ jcc(Assembler::equal, quicked);
3373 3373
3374 3374 __ push(atos);
3375 3375 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3376 3376 __ pop_ptr(rdx);
3377 3377 __ movptr(rdx, Address(rdx, oopDesc::klass_offset_in_bytes()));
3378 3378 __ jmp(resolved);
3379 3379
3380 3380 // Get superklass in EAX and subklass in EDX
3381 3381 __ bind(quicked);
3382 3382 __ movptr(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
3383 3383 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3384 3384
3385 3385 __ bind(resolved);
3386 3386
3387 3387 // Generate subtype check. Blows ECX. Resets EDI.
3388 3388 // Superklass in EAX. Subklass in EDX.
3389 3389 __ gen_subtype_check( rdx, ok_is_subtype );
3390 3390
3391 3391 // Come here on failure
3392 3392 __ xorl(rax,rax);
3393 3393 __ jmpb(done);
3394 3394 // Come here on success
3395 3395 __ bind(ok_is_subtype);
3396 3396 __ movl(rax, 1);
3397 3397
3398 3398 // Collect counts on whether this test sees NULLs a lot or not.
3399 3399 if (ProfileInterpreter) {
3400 3400 __ jmp(done);
3401 3401 __ bind(is_null);
3402 3402 __ profile_null_seen(rcx);
3403 3403 } else {
3404 3404 __ bind(is_null); // same as 'done'
3405 3405 }
3406 3406 __ bind(done);
3407 3407 // rax, = 0: obj == NULL or obj is not an instanceof the specified klass
3408 3408 // rax, = 1: obj != NULL and obj is an instanceof the specified klass
3409 3409 }
3410 3410
3411 3411
3412 3412 //----------------------------------------------------------------------------------------------------
3413 3413 // Breakpoints
3414 3414 void TemplateTable::_breakpoint() {
3415 3415
3416 3416 // Note: We get here even if we are single stepping..
3417 3417 // jbug inists on setting breakpoints at every bytecode
3418 3418 // even if we are in single step mode.
3419 3419
3420 3420 transition(vtos, vtos);
3421 3421
3422 3422 // get the unpatched byte code
3423 3423 __ get_method(rcx);
3424 3424 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), rcx, rsi);
3425 3425 __ mov(rbx, rax);
3426 3426
3427 3427 // post the breakpoint event
3428 3428 __ get_method(rcx);
3429 3429 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), rcx, rsi);
3430 3430
3431 3431 // complete the execution of original bytecode
3432 3432 __ dispatch_only_normal(vtos);
3433 3433 }
3434 3434
3435 3435
3436 3436 //----------------------------------------------------------------------------------------------------
3437 3437 // Exceptions
3438 3438
3439 3439 void TemplateTable::athrow() {
3440 3440 transition(atos, vtos);
3441 3441 __ null_check(rax);
3442 3442 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3443 3443 }
3444 3444
3445 3445
3446 3446 //----------------------------------------------------------------------------------------------------
3447 3447 // Synchronization
3448 3448 //
3449 3449 // Note: monitorenter & exit are symmetric routines; which is reflected
3450 3450 // in the assembly code structure as well
3451 3451 //
3452 3452 // Stack layout:
3453 3453 //
3454 3454 // [expressions ] <--- rsp = expression stack top
3455 3455 // ..
3456 3456 // [expressions ]
3457 3457 // [monitor entry] <--- monitor block top = expression stack bot
3458 3458 // ..
3459 3459 // [monitor entry]
3460 3460 // [frame data ] <--- monitor block bot
3461 3461 // ...
3462 3462 // [saved rbp, ] <--- rbp,
3463 3463
3464 3464
3465 3465 void TemplateTable::monitorenter() {
3466 3466 transition(atos, vtos);
3467 3467
3468 3468 // check for NULL object
3469 3469 __ null_check(rax);
3470 3470
3471 3471 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3472 3472 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3473 3473 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3474 3474 Label allocated;
3475 3475
3476 3476 // initialize entry pointer
3477 3477 __ xorl(rdx, rdx); // points to free slot or NULL
3478 3478
3479 3479 // find a free slot in the monitor block (result in rdx)
3480 3480 { Label entry, loop, exit;
3481 3481 __ movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry
3482 3482 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3483 3483 __ jmpb(entry);
3484 3484
3485 3485 __ bind(loop);
3486 3486 __ cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used
3487 3487
3488 3488 // TODO - need new func here - kbt
3489 3489 if (VM_Version::supports_cmov()) {
3490 3490 __ cmov(Assembler::equal, rdx, rcx); // if not used then remember entry in rdx
3491 3491 } else {
3492 3492 Label L;
3493 3493 __ jccb(Assembler::notEqual, L);
3494 3494 __ mov(rdx, rcx); // if not used then remember entry in rdx
3495 3495 __ bind(L);
3496 3496 }
3497 3497 __ cmpptr(rax, Address(rcx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3498 3498 __ jccb(Assembler::equal, exit); // if same object then stop searching
3499 3499 __ addptr(rcx, entry_size); // otherwise advance to next entry
3500 3500 __ bind(entry);
3501 3501 __ cmpptr(rcx, rbx); // check if bottom reached
3502 3502 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3503 3503 __ bind(exit);
3504 3504 }
3505 3505
3506 3506 __ testptr(rdx, rdx); // check if a slot has been found
3507 3507 __ jccb(Assembler::notZero, allocated); // if found, continue with that one
3508 3508
3509 3509 // allocate one if there's no free slot
3510 3510 { Label entry, loop;
3511 3511 // 1. compute new pointers // rsp: old expression stack top
3512 3512 __ movptr(rdx, monitor_block_bot); // rdx: old expression stack bottom
3513 3513 __ subptr(rsp, entry_size); // move expression stack top
3514 3514 __ subptr(rdx, entry_size); // move expression stack bottom
3515 3515 __ mov(rcx, rsp); // set start value for copy loop
3516 3516 __ movptr(monitor_block_bot, rdx); // set new monitor block top
3517 3517 __ jmp(entry);
3518 3518 // 2. move expression stack contents
3519 3519 __ bind(loop);
3520 3520 __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location
3521 3521 __ movptr(Address(rcx, 0), rbx); // and store it at new location
3522 3522 __ addptr(rcx, wordSize); // advance to next word
3523 3523 __ bind(entry);
3524 3524 __ cmpptr(rcx, rdx); // check if bottom reached
3525 3525 __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word
3526 3526 }
3527 3527
3528 3528 // call run-time routine
3529 3529 // rdx: points to monitor entry
3530 3530 __ bind(allocated);
3531 3531
3532 3532 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3533 3533 // The object has already been poped from the stack, so the expression stack looks correct.
3534 3534 __ increment(rsi);
3535 3535
3536 3536 __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
3537 3537 __ lock_object(rdx);
3538 3538
3539 3539 // check to make sure this monitor doesn't cause stack overflow after locking
3540 3540 __ save_bcp(); // in case of exception
3541 3541 __ generate_stack_overflow_check(0);
3542 3542
3543 3543 // The bcp has already been incremented. Just need to dispatch to next instruction.
3544 3544 __ dispatch_next(vtos);
3545 3545 }
3546 3546
3547 3547
3548 3548 void TemplateTable::monitorexit() {
3549 3549 transition(atos, vtos);
3550 3550
3551 3551 // check for NULL object
3552 3552 __ null_check(rax);
3553 3553
3554 3554 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3555 3555 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3556 3556 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3557 3557 Label found;
3558 3558
3559 3559 // find matching slot
3560 3560 { Label entry, loop;
3561 3561 __ movptr(rdx, monitor_block_top); // points to current entry, starting with top-most entry
3562 3562 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3563 3563 __ jmpb(entry);
3564 3564
3565 3565 __ bind(loop);
3566 3566 __ cmpptr(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3567 3567 __ jcc(Assembler::equal, found); // if same object then stop searching
3568 3568 __ addptr(rdx, entry_size); // otherwise advance to next entry
3569 3569 __ bind(entry);
3570 3570 __ cmpptr(rdx, rbx); // check if bottom reached
3571 3571 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3572 3572 }
3573 3573
3574 3574 // error handling. Unlocking was not block-structured
3575 3575 Label end;
3576 3576 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3577 3577 __ should_not_reach_here();
3578 3578
3579 3579 // call run-time routine
3580 3580 // rcx: points to monitor entry
3581 3581 __ bind(found);
3582 3582 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3583 3583 __ unlock_object(rdx);
3584 3584 __ pop_ptr(rax); // discard object
3585 3585 __ bind(end);
3586 3586 }
3587 3587
3588 3588
3589 3589 //----------------------------------------------------------------------------------------------------
3590 3590 // Wide instructions
3591 3591
3592 3592 void TemplateTable::wide() {
3593 3593 transition(vtos, vtos);
3594 3594 __ load_unsigned_byte(rbx, at_bcp(1));
3595 3595 ExternalAddress wtable((address)Interpreter::_wentry_point);
3596 3596 __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
3597 3597 // Note: the rsi increment step is part of the individual wide bytecode implementations
3598 3598 }
3599 3599
3600 3600
3601 3601 //----------------------------------------------------------------------------------------------------
3602 3602 // Multi arrays
3603 3603
3604 3604 void TemplateTable::multianewarray() {
3605 3605 transition(vtos, atos);
3606 3606 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3607 3607 // last dim is on top of stack; we want address of first one:
3608 3608 // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
3609 3609 // the latter wordSize to point to the beginning of the array.
3610 3610 __ lea( rax, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
3611 3611 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rax); // pass in rax,
3612 3612 __ load_unsigned_byte(rbx, at_bcp(3));
3613 3613 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts
3614 3614 }
3615 3615
3616 3616 #endif /* !CC_INTERP */
↓ open down ↓ |
3211 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX