1 /* 2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interpreter/interpreter.hpp" 27 #include "interpreter/interpreterRuntime.hpp" 28 #include "interpreter/templateTable.hpp" 29 #include "memory/universe.inline.hpp" 30 #include "oops/methodDataOop.hpp" 31 #include "oops/objArrayKlass.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "prims/methodHandles.hpp" 34 #include "runtime/sharedRuntime.hpp" 35 #include "runtime/stubRoutines.hpp" 36 #include "runtime/synchronizer.hpp" 37 38 #ifndef CC_INTERP 39 40 #define __ _masm-> 41 42 // Platform-dependent initialization 43 44 void TemplateTable::pd_initialize() { 45 // No amd64 specific initialization 46 } 47 48 // Address computation: local variables 49 50 static inline Address iaddress(int n) { 51 return Address(r14, Interpreter::local_offset_in_bytes(n)); 52 } 53 54 static inline Address laddress(int n) { 55 return iaddress(n + 1); 56 } 57 58 static inline Address faddress(int n) { 59 return iaddress(n); 60 } 61 62 static inline Address daddress(int n) { 63 return laddress(n); 64 } 65 66 static inline Address aaddress(int n) { 67 return iaddress(n); 68 } 69 70 static inline Address iaddress(Register r) { 71 return Address(r14, r, Address::times_8); 72 } 73 74 static inline Address laddress(Register r) { 75 return Address(r14, r, Address::times_8, Interpreter::local_offset_in_bytes(1)); 76 } 77 78 static inline Address faddress(Register r) { 79 return iaddress(r); 80 } 81 82 static inline Address daddress(Register r) { 83 return laddress(r); 84 } 85 86 static inline Address aaddress(Register r) { 87 return iaddress(r); 88 } 89 90 static inline Address at_rsp() { 91 return Address(rsp, 0); 92 } 93 94 // At top of Java expression stack which may be different than esp(). It 95 // isn't for category 1 objects. 96 static inline Address at_tos () { 97 return Address(rsp, Interpreter::expr_offset_in_bytes(0)); 98 } 99 100 static inline Address at_tos_p1() { 101 return Address(rsp, Interpreter::expr_offset_in_bytes(1)); 102 } 103 104 static inline Address at_tos_p2() { 105 return Address(rsp, Interpreter::expr_offset_in_bytes(2)); 106 } 107 108 static inline Address at_tos_p3() { 109 return Address(rsp, Interpreter::expr_offset_in_bytes(3)); 110 } 111 112 // Condition conversion 113 static Assembler::Condition j_not(TemplateTable::Condition cc) { 114 switch (cc) { 115 case TemplateTable::equal : return Assembler::notEqual; 116 case TemplateTable::not_equal : return Assembler::equal; 117 case TemplateTable::less : return Assembler::greaterEqual; 118 case TemplateTable::less_equal : return Assembler::greater; 119 case TemplateTable::greater : return Assembler::lessEqual; 120 case TemplateTable::greater_equal: return Assembler::less; 121 } 122 ShouldNotReachHere(); 123 return Assembler::zero; 124 } 125 126 127 // Miscelaneous helper routines 128 // Store an oop (or NULL) at the address described by obj. 129 // If val == noreg this means store a NULL 130 131 static void do_oop_store(InterpreterMacroAssembler* _masm, 132 Address obj, 133 Register val, 134 BarrierSet::Name barrier, 135 bool precise) { 136 assert(val == noreg || val == rax, "parameter is just for looks"); 137 switch (barrier) { 138 #ifndef SERIALGC 139 case BarrierSet::G1SATBCT: 140 case BarrierSet::G1SATBCTLogging: 141 { 142 // flatten object address if needed 143 if (obj.index() == noreg && obj.disp() == 0) { 144 if (obj.base() != rdx) { 145 __ movq(rdx, obj.base()); 146 } 147 } else { 148 __ leaq(rdx, obj); 149 } 150 __ g1_write_barrier_pre(rdx /* obj */, 151 rbx /* pre_val */, 152 r15_thread /* thread */, 153 r8 /* tmp */, 154 val != noreg /* tosca_live */, 155 false /* expand_call */); 156 if (val == noreg) { 157 __ store_heap_oop_null(Address(rdx, 0)); 158 } else { 159 // G1 barrier needs uncompressed oop for region cross check. 160 Register new_val = val; 161 if (UseCompressedOops) { 162 new_val = rbx; 163 __ movptr(new_val, val); 164 } 165 __ store_heap_oop(Address(rdx, 0), val); 166 __ g1_write_barrier_post(rdx /* store_adr */, 167 new_val /* new_val */, 168 r15_thread /* thread */, 169 r8 /* tmp */, 170 rbx /* tmp2 */); 171 } 172 } 173 break; 174 #endif // SERIALGC 175 case BarrierSet::CardTableModRef: 176 case BarrierSet::CardTableExtension: 177 { 178 if (val == noreg) { 179 __ store_heap_oop_null(obj); 180 } else { 181 __ store_heap_oop(obj, val); 182 // flatten object address if needed 183 if (!precise || (obj.index() == noreg && obj.disp() == 0)) { 184 __ store_check(obj.base()); 185 } else { 186 __ leaq(rdx, obj); 187 __ store_check(rdx); 188 } 189 } 190 } 191 break; 192 case BarrierSet::ModRef: 193 case BarrierSet::Other: 194 if (val == noreg) { 195 __ store_heap_oop_null(obj); 196 } else { 197 __ store_heap_oop(obj, val); 198 } 199 break; 200 default : 201 ShouldNotReachHere(); 202 203 } 204 } 205 206 Address TemplateTable::at_bcp(int offset) { 207 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); 208 return Address(r13, offset); 209 } 210 211 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, 212 Register temp_reg, bool load_bc_into_bc_reg/*=true*/, 213 int byte_no) { 214 if (!RewriteBytecodes) return; 215 Label L_patch_done; 216 217 switch (bc) { 218 case Bytecodes::_fast_aputfield: 219 case Bytecodes::_fast_bputfield: 220 case Bytecodes::_fast_cputfield: 221 case Bytecodes::_fast_dputfield: 222 case Bytecodes::_fast_fputfield: 223 case Bytecodes::_fast_iputfield: 224 case Bytecodes::_fast_lputfield: 225 case Bytecodes::_fast_sputfield: 226 { 227 // We skip bytecode quickening for putfield instructions when 228 // the put_code written to the constant pool cache is zero. 229 // This is required so that every execution of this instruction 230 // calls out to InterpreterRuntime::resolve_get_put to do 231 // additional, required work. 232 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 233 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 234 __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1); 235 __ movl(bc_reg, bc); 236 __ cmpl(temp_reg, (int) 0); 237 __ jcc(Assembler::zero, L_patch_done); // don't patch 238 } 239 break; 240 default: 241 assert(byte_no == -1, "sanity"); 242 // the pair bytecodes have already done the load. 243 if (load_bc_into_bc_reg) { 244 __ movl(bc_reg, bc); 245 } 246 } 247 248 if (JvmtiExport::can_post_breakpoint()) { 249 Label L_fast_patch; 250 // if a breakpoint is present we can't rewrite the stream directly 251 __ movzbl(temp_reg, at_bcp(0)); 252 __ cmpl(temp_reg, Bytecodes::_breakpoint); 253 __ jcc(Assembler::notEqual, L_fast_patch); 254 __ get_method(temp_reg); 255 // Let breakpoint table handling rewrite to quicker bytecode 256 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, r13, bc_reg); 257 #ifndef ASSERT 258 __ jmpb(L_patch_done); 259 #else 260 __ jmp(L_patch_done); 261 #endif 262 __ bind(L_fast_patch); 263 } 264 265 #ifdef ASSERT 266 Label L_okay; 267 __ load_unsigned_byte(temp_reg, at_bcp(0)); 268 __ cmpl(temp_reg, (int) Bytecodes::java_code(bc)); 269 __ jcc(Assembler::equal, L_okay); 270 __ cmpl(temp_reg, bc_reg); 271 __ jcc(Assembler::equal, L_okay); 272 __ stop("patching the wrong bytecode"); 273 __ bind(L_okay); 274 #endif 275 276 // patch bytecode 277 __ movb(at_bcp(0), bc_reg); 278 __ bind(L_patch_done); 279 } 280 281 282 // Individual instructions 283 284 void TemplateTable::nop() { 285 transition(vtos, vtos); 286 // nothing to do 287 } 288 289 void TemplateTable::shouldnotreachhere() { 290 transition(vtos, vtos); 291 __ stop("shouldnotreachhere bytecode"); 292 } 293 294 void TemplateTable::aconst_null() { 295 transition(vtos, atos); 296 __ xorl(rax, rax); 297 } 298 299 void TemplateTable::iconst(int value) { 300 transition(vtos, itos); 301 if (value == 0) { 302 __ xorl(rax, rax); 303 } else { 304 __ movl(rax, value); 305 } 306 } 307 308 void TemplateTable::lconst(int value) { 309 transition(vtos, ltos); 310 if (value == 0) { 311 __ xorl(rax, rax); 312 } else { 313 __ movl(rax, value); 314 } 315 } 316 317 void TemplateTable::fconst(int value) { 318 transition(vtos, ftos); 319 static float one = 1.0f, two = 2.0f; 320 switch (value) { 321 case 0: 322 __ xorps(xmm0, xmm0); 323 break; 324 case 1: 325 __ movflt(xmm0, ExternalAddress((address) &one)); 326 break; 327 case 2: 328 __ movflt(xmm0, ExternalAddress((address) &two)); 329 break; 330 default: 331 ShouldNotReachHere(); 332 break; 333 } 334 } 335 336 void TemplateTable::dconst(int value) { 337 transition(vtos, dtos); 338 static double one = 1.0; 339 switch (value) { 340 case 0: 341 __ xorpd(xmm0, xmm0); 342 break; 343 case 1: 344 __ movdbl(xmm0, ExternalAddress((address) &one)); 345 break; 346 default: 347 ShouldNotReachHere(); 348 break; 349 } 350 } 351 352 void TemplateTable::bipush() { 353 transition(vtos, itos); 354 __ load_signed_byte(rax, at_bcp(1)); 355 } 356 357 void TemplateTable::sipush() { 358 transition(vtos, itos); 359 __ load_unsigned_short(rax, at_bcp(1)); 360 __ bswapl(rax); 361 __ sarl(rax, 16); 362 } 363 364 void TemplateTable::ldc(bool wide) { 365 transition(vtos, vtos); 366 Label call_ldc, notFloat, notClass, Done; 367 368 if (wide) { 369 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); 370 } else { 371 __ load_unsigned_byte(rbx, at_bcp(1)); 372 } 373 374 __ get_cpool_and_tags(rcx, rax); 375 const int base_offset = constantPoolOopDesc::header_size() * wordSize; 376 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize; 377 378 // get type 379 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset)); 380 381 // unresolved string - get the resolved string 382 __ cmpl(rdx, JVM_CONSTANT_UnresolvedString); 383 __ jccb(Assembler::equal, call_ldc); 384 385 // unresolved class - get the resolved class 386 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass); 387 __ jccb(Assembler::equal, call_ldc); 388 389 // unresolved class in error state - call into runtime to throw the error 390 // from the first resolution attempt 391 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError); 392 __ jccb(Assembler::equal, call_ldc); 393 394 // resolved class - need to call vm to get java mirror of the class 395 __ cmpl(rdx, JVM_CONSTANT_Class); 396 __ jcc(Assembler::notEqual, notClass); 397 398 __ bind(call_ldc); 399 __ movl(c_rarg1, wide); 400 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1); 401 __ push_ptr(rax); 402 __ verify_oop(rax); 403 __ jmp(Done); 404 405 __ bind(notClass); 406 __ cmpl(rdx, JVM_CONSTANT_Float); 407 __ jccb(Assembler::notEqual, notFloat); 408 // ftos 409 __ movflt(xmm0, Address(rcx, rbx, Address::times_8, base_offset)); 410 __ push_f(); 411 __ jmp(Done); 412 413 __ bind(notFloat); 414 #ifdef ASSERT 415 { 416 Label L; 417 __ cmpl(rdx, JVM_CONSTANT_Integer); 418 __ jcc(Assembler::equal, L); 419 __ cmpl(rdx, JVM_CONSTANT_String); 420 __ jcc(Assembler::equal, L); 421 __ cmpl(rdx, JVM_CONSTANT_Object); 422 __ jcc(Assembler::equal, L); 423 __ stop("unexpected tag type in ldc"); 424 __ bind(L); 425 } 426 #endif 427 // atos and itos 428 Label isOop; 429 __ cmpl(rdx, JVM_CONSTANT_Integer); 430 __ jcc(Assembler::notEqual, isOop); 431 __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset)); 432 __ push_i(rax); 433 __ jmp(Done); 434 435 __ bind(isOop); 436 __ movptr(rax, Address(rcx, rbx, Address::times_8, base_offset)); 437 __ push_ptr(rax); 438 439 if (VerifyOops) { 440 __ verify_oop(rax); 441 } 442 443 __ bind(Done); 444 } 445 446 // Fast path for caching oop constants. 447 // %%% We should use this to handle Class and String constants also. 448 // %%% It will simplify the ldc/primitive path considerably. 449 void TemplateTable::fast_aldc(bool wide) { 450 transition(vtos, atos); 451 452 if (!EnableInvokeDynamic) { 453 // We should not encounter this bytecode if !EnableInvokeDynamic. 454 // The verifier will stop it. However, if we get past the verifier, 455 // this will stop the thread in a reasonable way, without crashing the JVM. 456 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 457 InterpreterRuntime::throw_IncompatibleClassChangeError)); 458 // the call_VM checks for exception, so we should never return here. 459 __ should_not_reach_here(); 460 return; 461 } 462 463 const Register cache = rcx; 464 const Register index = rdx; 465 466 resolve_cache_and_index(f12_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1)); 467 if (VerifyOops) { 468 __ verify_oop(rax); 469 } 470 471 Label L_done, L_throw_exception; 472 const Register con_klass_temp = rcx; // same as cache 473 const Register array_klass_temp = rdx; // same as index 474 __ load_klass(con_klass_temp, rax); 475 __ lea(array_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr())); 476 __ cmpptr(con_klass_temp, Address(array_klass_temp, 0)); 477 __ jcc(Assembler::notEqual, L_done); 478 __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0); 479 __ jcc(Assembler::notEqual, L_throw_exception); 480 __ xorptr(rax, rax); 481 __ jmp(L_done); 482 483 // Load the exception from the system-array which wraps it: 484 __ bind(L_throw_exception); 485 __ load_heap_oop(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 486 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 487 488 __ bind(L_done); 489 } 490 491 void TemplateTable::ldc2_w() { 492 transition(vtos, vtos); 493 Label Long, Done; 494 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); 495 496 __ get_cpool_and_tags(rcx, rax); 497 const int base_offset = constantPoolOopDesc::header_size() * wordSize; 498 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize; 499 500 // get type 501 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), 502 JVM_CONSTANT_Double); 503 __ jccb(Assembler::notEqual, Long); 504 // dtos 505 __ movdbl(xmm0, Address(rcx, rbx, Address::times_8, base_offset)); 506 __ push_d(); 507 __ jmpb(Done); 508 509 __ bind(Long); 510 // ltos 511 __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset)); 512 __ push_l(); 513 514 __ bind(Done); 515 } 516 517 void TemplateTable::locals_index(Register reg, int offset) { 518 __ load_unsigned_byte(reg, at_bcp(offset)); 519 __ negptr(reg); 520 } 521 522 void TemplateTable::iload() { 523 transition(vtos, itos); 524 if (RewriteFrequentPairs) { 525 Label rewrite, done; 526 const Register bc = c_rarg3; 527 assert(rbx != bc, "register damaged"); 528 529 // get next byte 530 __ load_unsigned_byte(rbx, 531 at_bcp(Bytecodes::length_for(Bytecodes::_iload))); 532 // if _iload, wait to rewrite to iload2. We only want to rewrite the 533 // last two iloads in a pair. Comparing against fast_iload means that 534 // the next bytecode is neither an iload or a caload, and therefore 535 // an iload pair. 536 __ cmpl(rbx, Bytecodes::_iload); 537 __ jcc(Assembler::equal, done); 538 539 __ cmpl(rbx, Bytecodes::_fast_iload); 540 __ movl(bc, Bytecodes::_fast_iload2); 541 __ jccb(Assembler::equal, rewrite); 542 543 // if _caload, rewrite to fast_icaload 544 __ cmpl(rbx, Bytecodes::_caload); 545 __ movl(bc, Bytecodes::_fast_icaload); 546 __ jccb(Assembler::equal, rewrite); 547 548 // rewrite so iload doesn't check again. 549 __ movl(bc, Bytecodes::_fast_iload); 550 551 // rewrite 552 // bc: fast bytecode 553 __ bind(rewrite); 554 patch_bytecode(Bytecodes::_iload, bc, rbx, false); 555 __ bind(done); 556 } 557 558 // Get the local value into tos 559 locals_index(rbx); 560 __ movl(rax, iaddress(rbx)); 561 } 562 563 void TemplateTable::fast_iload2() { 564 transition(vtos, itos); 565 locals_index(rbx); 566 __ movl(rax, iaddress(rbx)); 567 __ push(itos); 568 locals_index(rbx, 3); 569 __ movl(rax, iaddress(rbx)); 570 } 571 572 void TemplateTable::fast_iload() { 573 transition(vtos, itos); 574 locals_index(rbx); 575 __ movl(rax, iaddress(rbx)); 576 } 577 578 void TemplateTable::lload() { 579 transition(vtos, ltos); 580 locals_index(rbx); 581 __ movq(rax, laddress(rbx)); 582 } 583 584 void TemplateTable::fload() { 585 transition(vtos, ftos); 586 locals_index(rbx); 587 __ movflt(xmm0, faddress(rbx)); 588 } 589 590 void TemplateTable::dload() { 591 transition(vtos, dtos); 592 locals_index(rbx); 593 __ movdbl(xmm0, daddress(rbx)); 594 } 595 596 void TemplateTable::aload() { 597 transition(vtos, atos); 598 locals_index(rbx); 599 __ movptr(rax, aaddress(rbx)); 600 } 601 602 void TemplateTable::locals_index_wide(Register reg) { 603 __ movl(reg, at_bcp(2)); 604 __ bswapl(reg); 605 __ shrl(reg, 16); 606 __ negptr(reg); 607 } 608 609 void TemplateTable::wide_iload() { 610 transition(vtos, itos); 611 locals_index_wide(rbx); 612 __ movl(rax, iaddress(rbx)); 613 } 614 615 void TemplateTable::wide_lload() { 616 transition(vtos, ltos); 617 locals_index_wide(rbx); 618 __ movq(rax, laddress(rbx)); 619 } 620 621 void TemplateTable::wide_fload() { 622 transition(vtos, ftos); 623 locals_index_wide(rbx); 624 __ movflt(xmm0, faddress(rbx)); 625 } 626 627 void TemplateTable::wide_dload() { 628 transition(vtos, dtos); 629 locals_index_wide(rbx); 630 __ movdbl(xmm0, daddress(rbx)); 631 } 632 633 void TemplateTable::wide_aload() { 634 transition(vtos, atos); 635 locals_index_wide(rbx); 636 __ movptr(rax, aaddress(rbx)); 637 } 638 639 void TemplateTable::index_check(Register array, Register index) { 640 // destroys rbx 641 // check array 642 __ null_check(array, arrayOopDesc::length_offset_in_bytes()); 643 // sign extend index for use by indexed load 644 __ movl2ptr(index, index); 645 // check index 646 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes())); 647 if (index != rbx) { 648 // ??? convention: move aberrant index into ebx for exception message 649 assert(rbx != array, "different registers"); 650 __ movl(rbx, index); 651 } 652 __ jump_cc(Assembler::aboveEqual, 653 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry)); 654 } 655 656 void TemplateTable::iaload() { 657 transition(itos, itos); 658 __ pop_ptr(rdx); 659 // eax: index 660 // rdx: array 661 index_check(rdx, rax); // kills rbx 662 __ movl(rax, Address(rdx, rax, 663 Address::times_4, 664 arrayOopDesc::base_offset_in_bytes(T_INT))); 665 } 666 667 void TemplateTable::laload() { 668 transition(itos, ltos); 669 __ pop_ptr(rdx); 670 // eax: index 671 // rdx: array 672 index_check(rdx, rax); // kills rbx 673 __ movq(rax, Address(rdx, rbx, 674 Address::times_8, 675 arrayOopDesc::base_offset_in_bytes(T_LONG))); 676 } 677 678 void TemplateTable::faload() { 679 transition(itos, ftos); 680 __ pop_ptr(rdx); 681 // eax: index 682 // rdx: array 683 index_check(rdx, rax); // kills rbx 684 __ movflt(xmm0, Address(rdx, rax, 685 Address::times_4, 686 arrayOopDesc::base_offset_in_bytes(T_FLOAT))); 687 } 688 689 void TemplateTable::daload() { 690 transition(itos, dtos); 691 __ pop_ptr(rdx); 692 // eax: index 693 // rdx: array 694 index_check(rdx, rax); // kills rbx 695 __ movdbl(xmm0, Address(rdx, rax, 696 Address::times_8, 697 arrayOopDesc::base_offset_in_bytes(T_DOUBLE))); 698 } 699 700 void TemplateTable::aaload() { 701 transition(itos, atos); 702 __ pop_ptr(rdx); 703 // eax: index 704 // rdx: array 705 index_check(rdx, rax); // kills rbx 706 __ load_heap_oop(rax, Address(rdx, rax, 707 UseCompressedOops ? Address::times_4 : Address::times_8, 708 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 709 } 710 711 void TemplateTable::baload() { 712 transition(itos, itos); 713 __ pop_ptr(rdx); 714 // eax: index 715 // rdx: array 716 index_check(rdx, rax); // kills rbx 717 __ load_signed_byte(rax, 718 Address(rdx, rax, 719 Address::times_1, 720 arrayOopDesc::base_offset_in_bytes(T_BYTE))); 721 } 722 723 void TemplateTable::caload() { 724 transition(itos, itos); 725 __ pop_ptr(rdx); 726 // eax: index 727 // rdx: array 728 index_check(rdx, rax); // kills rbx 729 __ load_unsigned_short(rax, 730 Address(rdx, rax, 731 Address::times_2, 732 arrayOopDesc::base_offset_in_bytes(T_CHAR))); 733 } 734 735 // iload followed by caload frequent pair 736 void TemplateTable::fast_icaload() { 737 transition(vtos, itos); 738 // load index out of locals 739 locals_index(rbx); 740 __ movl(rax, iaddress(rbx)); 741 742 // eax: index 743 // rdx: array 744 __ pop_ptr(rdx); 745 index_check(rdx, rax); // kills rbx 746 __ load_unsigned_short(rax, 747 Address(rdx, rax, 748 Address::times_2, 749 arrayOopDesc::base_offset_in_bytes(T_CHAR))); 750 } 751 752 void TemplateTable::saload() { 753 transition(itos, itos); 754 __ pop_ptr(rdx); 755 // eax: index 756 // rdx: array 757 index_check(rdx, rax); // kills rbx 758 __ load_signed_short(rax, 759 Address(rdx, rax, 760 Address::times_2, 761 arrayOopDesc::base_offset_in_bytes(T_SHORT))); 762 } 763 764 void TemplateTable::iload(int n) { 765 transition(vtos, itos); 766 __ movl(rax, iaddress(n)); 767 } 768 769 void TemplateTable::lload(int n) { 770 transition(vtos, ltos); 771 __ movq(rax, laddress(n)); 772 } 773 774 void TemplateTable::fload(int n) { 775 transition(vtos, ftos); 776 __ movflt(xmm0, faddress(n)); 777 } 778 779 void TemplateTable::dload(int n) { 780 transition(vtos, dtos); 781 __ movdbl(xmm0, daddress(n)); 782 } 783 784 void TemplateTable::aload(int n) { 785 transition(vtos, atos); 786 __ movptr(rax, aaddress(n)); 787 } 788 789 void TemplateTable::aload_0() { 790 transition(vtos, atos); 791 // According to bytecode histograms, the pairs: 792 // 793 // _aload_0, _fast_igetfield 794 // _aload_0, _fast_agetfield 795 // _aload_0, _fast_fgetfield 796 // 797 // occur frequently. If RewriteFrequentPairs is set, the (slow) 798 // _aload_0 bytecode checks if the next bytecode is either 799 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then 800 // rewrites the current bytecode into a pair bytecode; otherwise it 801 // rewrites the current bytecode into _fast_aload_0 that doesn't do 802 // the pair check anymore. 803 // 804 // Note: If the next bytecode is _getfield, the rewrite must be 805 // delayed, otherwise we may miss an opportunity for a pair. 806 // 807 // Also rewrite frequent pairs 808 // aload_0, aload_1 809 // aload_0, iload_1 810 // These bytecodes with a small amount of code are most profitable 811 // to rewrite 812 if (RewriteFrequentPairs) { 813 Label rewrite, done; 814 const Register bc = c_rarg3; 815 assert(rbx != bc, "register damaged"); 816 // get next byte 817 __ load_unsigned_byte(rbx, 818 at_bcp(Bytecodes::length_for(Bytecodes::_aload_0))); 819 820 // do actual aload_0 821 aload(0); 822 823 // if _getfield then wait with rewrite 824 __ cmpl(rbx, Bytecodes::_getfield); 825 __ jcc(Assembler::equal, done); 826 827 // if _igetfield then reqrite to _fast_iaccess_0 828 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == 829 Bytecodes::_aload_0, 830 "fix bytecode definition"); 831 __ cmpl(rbx, Bytecodes::_fast_igetfield); 832 __ movl(bc, Bytecodes::_fast_iaccess_0); 833 __ jccb(Assembler::equal, rewrite); 834 835 // if _agetfield then reqrite to _fast_aaccess_0 836 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == 837 Bytecodes::_aload_0, 838 "fix bytecode definition"); 839 __ cmpl(rbx, Bytecodes::_fast_agetfield); 840 __ movl(bc, Bytecodes::_fast_aaccess_0); 841 __ jccb(Assembler::equal, rewrite); 842 843 // if _fgetfield then reqrite to _fast_faccess_0 844 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == 845 Bytecodes::_aload_0, 846 "fix bytecode definition"); 847 __ cmpl(rbx, Bytecodes::_fast_fgetfield); 848 __ movl(bc, Bytecodes::_fast_faccess_0); 849 __ jccb(Assembler::equal, rewrite); 850 851 // else rewrite to _fast_aload0 852 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == 853 Bytecodes::_aload_0, 854 "fix bytecode definition"); 855 __ movl(bc, Bytecodes::_fast_aload_0); 856 857 // rewrite 858 // bc: fast bytecode 859 __ bind(rewrite); 860 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false); 861 862 __ bind(done); 863 } else { 864 aload(0); 865 } 866 } 867 868 void TemplateTable::istore() { 869 transition(itos, vtos); 870 locals_index(rbx); 871 __ movl(iaddress(rbx), rax); 872 } 873 874 void TemplateTable::lstore() { 875 transition(ltos, vtos); 876 locals_index(rbx); 877 __ movq(laddress(rbx), rax); 878 } 879 880 void TemplateTable::fstore() { 881 transition(ftos, vtos); 882 locals_index(rbx); 883 __ movflt(faddress(rbx), xmm0); 884 } 885 886 void TemplateTable::dstore() { 887 transition(dtos, vtos); 888 locals_index(rbx); 889 __ movdbl(daddress(rbx), xmm0); 890 } 891 892 void TemplateTable::astore() { 893 transition(vtos, vtos); 894 __ pop_ptr(rax); 895 locals_index(rbx); 896 __ movptr(aaddress(rbx), rax); 897 } 898 899 void TemplateTable::wide_istore() { 900 transition(vtos, vtos); 901 __ pop_i(); 902 locals_index_wide(rbx); 903 __ movl(iaddress(rbx), rax); 904 } 905 906 void TemplateTable::wide_lstore() { 907 transition(vtos, vtos); 908 __ pop_l(); 909 locals_index_wide(rbx); 910 __ movq(laddress(rbx), rax); 911 } 912 913 void TemplateTable::wide_fstore() { 914 transition(vtos, vtos); 915 __ pop_f(); 916 locals_index_wide(rbx); 917 __ movflt(faddress(rbx), xmm0); 918 } 919 920 void TemplateTable::wide_dstore() { 921 transition(vtos, vtos); 922 __ pop_d(); 923 locals_index_wide(rbx); 924 __ movdbl(daddress(rbx), xmm0); 925 } 926 927 void TemplateTable::wide_astore() { 928 transition(vtos, vtos); 929 __ pop_ptr(rax); 930 locals_index_wide(rbx); 931 __ movptr(aaddress(rbx), rax); 932 } 933 934 void TemplateTable::iastore() { 935 transition(itos, vtos); 936 __ pop_i(rbx); 937 __ pop_ptr(rdx); 938 // eax: value 939 // ebx: index 940 // rdx: array 941 index_check(rdx, rbx); // prefer index in ebx 942 __ movl(Address(rdx, rbx, 943 Address::times_4, 944 arrayOopDesc::base_offset_in_bytes(T_INT)), 945 rax); 946 } 947 948 void TemplateTable::lastore() { 949 transition(ltos, vtos); 950 __ pop_i(rbx); 951 __ pop_ptr(rdx); 952 // rax: value 953 // ebx: index 954 // rdx: array 955 index_check(rdx, rbx); // prefer index in ebx 956 __ movq(Address(rdx, rbx, 957 Address::times_8, 958 arrayOopDesc::base_offset_in_bytes(T_LONG)), 959 rax); 960 } 961 962 void TemplateTable::fastore() { 963 transition(ftos, vtos); 964 __ pop_i(rbx); 965 __ pop_ptr(rdx); 966 // xmm0: value 967 // ebx: index 968 // rdx: array 969 index_check(rdx, rbx); // prefer index in ebx 970 __ movflt(Address(rdx, rbx, 971 Address::times_4, 972 arrayOopDesc::base_offset_in_bytes(T_FLOAT)), 973 xmm0); 974 } 975 976 void TemplateTable::dastore() { 977 transition(dtos, vtos); 978 __ pop_i(rbx); 979 __ pop_ptr(rdx); 980 // xmm0: value 981 // ebx: index 982 // rdx: array 983 index_check(rdx, rbx); // prefer index in ebx 984 __ movdbl(Address(rdx, rbx, 985 Address::times_8, 986 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)), 987 xmm0); 988 } 989 990 void TemplateTable::aastore() { 991 Label is_null, ok_is_subtype, done; 992 transition(vtos, vtos); 993 // stack: ..., array, index, value 994 __ movptr(rax, at_tos()); // value 995 __ movl(rcx, at_tos_p1()); // index 996 __ movptr(rdx, at_tos_p2()); // array 997 998 Address element_address(rdx, rcx, 999 UseCompressedOops? Address::times_4 : Address::times_8, 1000 arrayOopDesc::base_offset_in_bytes(T_OBJECT)); 1001 1002 index_check(rdx, rcx); // kills rbx 1003 // do array store check - check for NULL value first 1004 __ testptr(rax, rax); 1005 __ jcc(Assembler::zero, is_null); 1006 1007 // Move subklass into rbx 1008 __ load_klass(rbx, rax); 1009 // Move superklass into rax 1010 __ load_klass(rax, rdx); 1011 __ movptr(rax, Address(rax, 1012 objArrayKlass::element_klass_offset())); 1013 // Compress array + index*oopSize + 12 into a single register. Frees rcx. 1014 __ lea(rdx, element_address); 1015 1016 // Generate subtype check. Blows rcx, rdi 1017 // Superklass in rax. Subklass in rbx. 1018 __ gen_subtype_check(rbx, ok_is_subtype); 1019 1020 // Come here on failure 1021 // object is at TOS 1022 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry)); 1023 1024 // Come here on success 1025 __ bind(ok_is_subtype); 1026 1027 // Get the value we will store 1028 __ movptr(rax, at_tos()); 1029 // Now store using the appropriate barrier 1030 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true); 1031 __ jmp(done); 1032 1033 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx] 1034 __ bind(is_null); 1035 __ profile_null_seen(rbx); 1036 1037 // Store a NULL 1038 do_oop_store(_masm, element_address, noreg, _bs->kind(), true); 1039 1040 // Pop stack arguments 1041 __ bind(done); 1042 __ addptr(rsp, 3 * Interpreter::stackElementSize); 1043 } 1044 1045 void TemplateTable::bastore() { 1046 transition(itos, vtos); 1047 __ pop_i(rbx); 1048 __ pop_ptr(rdx); 1049 // eax: value 1050 // ebx: index 1051 // rdx: array 1052 index_check(rdx, rbx); // prefer index in ebx 1053 __ movb(Address(rdx, rbx, 1054 Address::times_1, 1055 arrayOopDesc::base_offset_in_bytes(T_BYTE)), 1056 rax); 1057 } 1058 1059 void TemplateTable::castore() { 1060 transition(itos, vtos); 1061 __ pop_i(rbx); 1062 __ pop_ptr(rdx); 1063 // eax: value 1064 // ebx: index 1065 // rdx: array 1066 index_check(rdx, rbx); // prefer index in ebx 1067 __ movw(Address(rdx, rbx, 1068 Address::times_2, 1069 arrayOopDesc::base_offset_in_bytes(T_CHAR)), 1070 rax); 1071 } 1072 1073 void TemplateTable::sastore() { 1074 castore(); 1075 } 1076 1077 void TemplateTable::istore(int n) { 1078 transition(itos, vtos); 1079 __ movl(iaddress(n), rax); 1080 } 1081 1082 void TemplateTable::lstore(int n) { 1083 transition(ltos, vtos); 1084 __ movq(laddress(n), rax); 1085 } 1086 1087 void TemplateTable::fstore(int n) { 1088 transition(ftos, vtos); 1089 __ movflt(faddress(n), xmm0); 1090 } 1091 1092 void TemplateTable::dstore(int n) { 1093 transition(dtos, vtos); 1094 __ movdbl(daddress(n), xmm0); 1095 } 1096 1097 void TemplateTable::astore(int n) { 1098 transition(vtos, vtos); 1099 __ pop_ptr(rax); 1100 __ movptr(aaddress(n), rax); 1101 } 1102 1103 void TemplateTable::pop() { 1104 transition(vtos, vtos); 1105 __ addptr(rsp, Interpreter::stackElementSize); 1106 } 1107 1108 void TemplateTable::pop2() { 1109 transition(vtos, vtos); 1110 __ addptr(rsp, 2 * Interpreter::stackElementSize); 1111 } 1112 1113 void TemplateTable::dup() { 1114 transition(vtos, vtos); 1115 __ load_ptr(0, rax); 1116 __ push_ptr(rax); 1117 // stack: ..., a, a 1118 } 1119 1120 void TemplateTable::dup_x1() { 1121 transition(vtos, vtos); 1122 // stack: ..., a, b 1123 __ load_ptr( 0, rax); // load b 1124 __ load_ptr( 1, rcx); // load a 1125 __ store_ptr(1, rax); // store b 1126 __ store_ptr(0, rcx); // store a 1127 __ push_ptr(rax); // push b 1128 // stack: ..., b, a, b 1129 } 1130 1131 void TemplateTable::dup_x2() { 1132 transition(vtos, vtos); 1133 // stack: ..., a, b, c 1134 __ load_ptr( 0, rax); // load c 1135 __ load_ptr( 2, rcx); // load a 1136 __ store_ptr(2, rax); // store c in a 1137 __ push_ptr(rax); // push c 1138 // stack: ..., c, b, c, c 1139 __ load_ptr( 2, rax); // load b 1140 __ store_ptr(2, rcx); // store a in b 1141 // stack: ..., c, a, c, c 1142 __ store_ptr(1, rax); // store b in c 1143 // stack: ..., c, a, b, c 1144 } 1145 1146 void TemplateTable::dup2() { 1147 transition(vtos, vtos); 1148 // stack: ..., a, b 1149 __ load_ptr(1, rax); // load a 1150 __ push_ptr(rax); // push a 1151 __ load_ptr(1, rax); // load b 1152 __ push_ptr(rax); // push b 1153 // stack: ..., a, b, a, b 1154 } 1155 1156 void TemplateTable::dup2_x1() { 1157 transition(vtos, vtos); 1158 // stack: ..., a, b, c 1159 __ load_ptr( 0, rcx); // load c 1160 __ load_ptr( 1, rax); // load b 1161 __ push_ptr(rax); // push b 1162 __ push_ptr(rcx); // push c 1163 // stack: ..., a, b, c, b, c 1164 __ store_ptr(3, rcx); // store c in b 1165 // stack: ..., a, c, c, b, c 1166 __ load_ptr( 4, rcx); // load a 1167 __ store_ptr(2, rcx); // store a in 2nd c 1168 // stack: ..., a, c, a, b, c 1169 __ store_ptr(4, rax); // store b in a 1170 // stack: ..., b, c, a, b, c 1171 } 1172 1173 void TemplateTable::dup2_x2() { 1174 transition(vtos, vtos); 1175 // stack: ..., a, b, c, d 1176 __ load_ptr( 0, rcx); // load d 1177 __ load_ptr( 1, rax); // load c 1178 __ push_ptr(rax); // push c 1179 __ push_ptr(rcx); // push d 1180 // stack: ..., a, b, c, d, c, d 1181 __ load_ptr( 4, rax); // load b 1182 __ store_ptr(2, rax); // store b in d 1183 __ store_ptr(4, rcx); // store d in b 1184 // stack: ..., a, d, c, b, c, d 1185 __ load_ptr( 5, rcx); // load a 1186 __ load_ptr( 3, rax); // load c 1187 __ store_ptr(3, rcx); // store a in c 1188 __ store_ptr(5, rax); // store c in a 1189 // stack: ..., c, d, a, b, c, d 1190 } 1191 1192 void TemplateTable::swap() { 1193 transition(vtos, vtos); 1194 // stack: ..., a, b 1195 __ load_ptr( 1, rcx); // load a 1196 __ load_ptr( 0, rax); // load b 1197 __ store_ptr(0, rcx); // store a in b 1198 __ store_ptr(1, rax); // store b in a 1199 // stack: ..., b, a 1200 } 1201 1202 void TemplateTable::iop2(Operation op) { 1203 transition(itos, itos); 1204 switch (op) { 1205 case add : __ pop_i(rdx); __ addl (rax, rdx); break; 1206 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break; 1207 case mul : __ pop_i(rdx); __ imull(rax, rdx); break; 1208 case _and : __ pop_i(rdx); __ andl (rax, rdx); break; 1209 case _or : __ pop_i(rdx); __ orl (rax, rdx); break; 1210 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break; 1211 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break; 1212 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break; 1213 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break; 1214 default : ShouldNotReachHere(); 1215 } 1216 } 1217 1218 void TemplateTable::lop2(Operation op) { 1219 transition(ltos, ltos); 1220 switch (op) { 1221 case add : __ pop_l(rdx); __ addptr(rax, rdx); break; 1222 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break; 1223 case _and : __ pop_l(rdx); __ andptr(rax, rdx); break; 1224 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break; 1225 case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break; 1226 default : ShouldNotReachHere(); 1227 } 1228 } 1229 1230 void TemplateTable::idiv() { 1231 transition(itos, itos); 1232 __ movl(rcx, rax); 1233 __ pop_i(rax); 1234 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If 1235 // they are not equal, one could do a normal division (no correction 1236 // needed), which may speed up this implementation for the common case. 1237 // (see also JVM spec., p.243 & p.271) 1238 __ corrected_idivl(rcx); 1239 } 1240 1241 void TemplateTable::irem() { 1242 transition(itos, itos); 1243 __ movl(rcx, rax); 1244 __ pop_i(rax); 1245 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If 1246 // they are not equal, one could do a normal division (no correction 1247 // needed), which may speed up this implementation for the common case. 1248 // (see also JVM spec., p.243 & p.271) 1249 __ corrected_idivl(rcx); 1250 __ movl(rax, rdx); 1251 } 1252 1253 void TemplateTable::lmul() { 1254 transition(ltos, ltos); 1255 __ pop_l(rdx); 1256 __ imulq(rax, rdx); 1257 } 1258 1259 void TemplateTable::ldiv() { 1260 transition(ltos, ltos); 1261 __ mov(rcx, rax); 1262 __ pop_l(rax); 1263 // generate explicit div0 check 1264 __ testq(rcx, rcx); 1265 __ jump_cc(Assembler::zero, 1266 ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); 1267 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If 1268 // they are not equal, one could do a normal division (no correction 1269 // needed), which may speed up this implementation for the common case. 1270 // (see also JVM spec., p.243 & p.271) 1271 __ corrected_idivq(rcx); // kills rbx 1272 } 1273 1274 void TemplateTable::lrem() { 1275 transition(ltos, ltos); 1276 __ mov(rcx, rax); 1277 __ pop_l(rax); 1278 __ testq(rcx, rcx); 1279 __ jump_cc(Assembler::zero, 1280 ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); 1281 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If 1282 // they are not equal, one could do a normal division (no correction 1283 // needed), which may speed up this implementation for the common case. 1284 // (see also JVM spec., p.243 & p.271) 1285 __ corrected_idivq(rcx); // kills rbx 1286 __ mov(rax, rdx); 1287 } 1288 1289 void TemplateTable::lshl() { 1290 transition(itos, ltos); 1291 __ movl(rcx, rax); // get shift count 1292 __ pop_l(rax); // get shift value 1293 __ shlq(rax); 1294 } 1295 1296 void TemplateTable::lshr() { 1297 transition(itos, ltos); 1298 __ movl(rcx, rax); // get shift count 1299 __ pop_l(rax); // get shift value 1300 __ sarq(rax); 1301 } 1302 1303 void TemplateTable::lushr() { 1304 transition(itos, ltos); 1305 __ movl(rcx, rax); // get shift count 1306 __ pop_l(rax); // get shift value 1307 __ shrq(rax); 1308 } 1309 1310 void TemplateTable::fop2(Operation op) { 1311 transition(ftos, ftos); 1312 switch (op) { 1313 case add: 1314 __ addss(xmm0, at_rsp()); 1315 __ addptr(rsp, Interpreter::stackElementSize); 1316 break; 1317 case sub: 1318 __ movflt(xmm1, xmm0); 1319 __ pop_f(xmm0); 1320 __ subss(xmm0, xmm1); 1321 break; 1322 case mul: 1323 __ mulss(xmm0, at_rsp()); 1324 __ addptr(rsp, Interpreter::stackElementSize); 1325 break; 1326 case div: 1327 __ movflt(xmm1, xmm0); 1328 __ pop_f(xmm0); 1329 __ divss(xmm0, xmm1); 1330 break; 1331 case rem: 1332 __ movflt(xmm1, xmm0); 1333 __ pop_f(xmm0); 1334 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2); 1335 break; 1336 default: 1337 ShouldNotReachHere(); 1338 break; 1339 } 1340 } 1341 1342 void TemplateTable::dop2(Operation op) { 1343 transition(dtos, dtos); 1344 switch (op) { 1345 case add: 1346 __ addsd(xmm0, at_rsp()); 1347 __ addptr(rsp, 2 * Interpreter::stackElementSize); 1348 break; 1349 case sub: 1350 __ movdbl(xmm1, xmm0); 1351 __ pop_d(xmm0); 1352 __ subsd(xmm0, xmm1); 1353 break; 1354 case mul: 1355 __ mulsd(xmm0, at_rsp()); 1356 __ addptr(rsp, 2 * Interpreter::stackElementSize); 1357 break; 1358 case div: 1359 __ movdbl(xmm1, xmm0); 1360 __ pop_d(xmm0); 1361 __ divsd(xmm0, xmm1); 1362 break; 1363 case rem: 1364 __ movdbl(xmm1, xmm0); 1365 __ pop_d(xmm0); 1366 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2); 1367 break; 1368 default: 1369 ShouldNotReachHere(); 1370 break; 1371 } 1372 } 1373 1374 void TemplateTable::ineg() { 1375 transition(itos, itos); 1376 __ negl(rax); 1377 } 1378 1379 void TemplateTable::lneg() { 1380 transition(ltos, ltos); 1381 __ negq(rax); 1382 } 1383 1384 // Note: 'double' and 'long long' have 32-bits alignment on x86. 1385 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { 1386 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address 1387 // of 128-bits operands for SSE instructions. 1388 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF))); 1389 // Store the value to a 128-bits operand. 1390 operand[0] = lo; 1391 operand[1] = hi; 1392 return operand; 1393 } 1394 1395 // Buffer for 128-bits masks used by SSE instructions. 1396 static jlong float_signflip_pool[2*2]; 1397 static jlong double_signflip_pool[2*2]; 1398 1399 void TemplateTable::fneg() { 1400 transition(ftos, ftos); 1401 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000); 1402 __ xorps(xmm0, ExternalAddress((address) float_signflip)); 1403 } 1404 1405 void TemplateTable::dneg() { 1406 transition(dtos, dtos); 1407 static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000); 1408 __ xorpd(xmm0, ExternalAddress((address) double_signflip)); 1409 } 1410 1411 void TemplateTable::iinc() { 1412 transition(vtos, vtos); 1413 __ load_signed_byte(rdx, at_bcp(2)); // get constant 1414 locals_index(rbx); 1415 __ addl(iaddress(rbx), rdx); 1416 } 1417 1418 void TemplateTable::wide_iinc() { 1419 transition(vtos, vtos); 1420 __ movl(rdx, at_bcp(4)); // get constant 1421 locals_index_wide(rbx); 1422 __ bswapl(rdx); // swap bytes & sign-extend constant 1423 __ sarl(rdx, 16); 1424 __ addl(iaddress(rbx), rdx); 1425 // Note: should probably use only one movl to get both 1426 // the index and the constant -> fix this 1427 } 1428 1429 void TemplateTable::convert() { 1430 // Checking 1431 #ifdef ASSERT 1432 { 1433 TosState tos_in = ilgl; 1434 TosState tos_out = ilgl; 1435 switch (bytecode()) { 1436 case Bytecodes::_i2l: // fall through 1437 case Bytecodes::_i2f: // fall through 1438 case Bytecodes::_i2d: // fall through 1439 case Bytecodes::_i2b: // fall through 1440 case Bytecodes::_i2c: // fall through 1441 case Bytecodes::_i2s: tos_in = itos; break; 1442 case Bytecodes::_l2i: // fall through 1443 case Bytecodes::_l2f: // fall through 1444 case Bytecodes::_l2d: tos_in = ltos; break; 1445 case Bytecodes::_f2i: // fall through 1446 case Bytecodes::_f2l: // fall through 1447 case Bytecodes::_f2d: tos_in = ftos; break; 1448 case Bytecodes::_d2i: // fall through 1449 case Bytecodes::_d2l: // fall through 1450 case Bytecodes::_d2f: tos_in = dtos; break; 1451 default : ShouldNotReachHere(); 1452 } 1453 switch (bytecode()) { 1454 case Bytecodes::_l2i: // fall through 1455 case Bytecodes::_f2i: // fall through 1456 case Bytecodes::_d2i: // fall through 1457 case Bytecodes::_i2b: // fall through 1458 case Bytecodes::_i2c: // fall through 1459 case Bytecodes::_i2s: tos_out = itos; break; 1460 case Bytecodes::_i2l: // fall through 1461 case Bytecodes::_f2l: // fall through 1462 case Bytecodes::_d2l: tos_out = ltos; break; 1463 case Bytecodes::_i2f: // fall through 1464 case Bytecodes::_l2f: // fall through 1465 case Bytecodes::_d2f: tos_out = ftos; break; 1466 case Bytecodes::_i2d: // fall through 1467 case Bytecodes::_l2d: // fall through 1468 case Bytecodes::_f2d: tos_out = dtos; break; 1469 default : ShouldNotReachHere(); 1470 } 1471 transition(tos_in, tos_out); 1472 } 1473 #endif // ASSERT 1474 1475 static const int64_t is_nan = 0x8000000000000000L; 1476 1477 // Conversion 1478 switch (bytecode()) { 1479 case Bytecodes::_i2l: 1480 __ movslq(rax, rax); 1481 break; 1482 case Bytecodes::_i2f: 1483 __ cvtsi2ssl(xmm0, rax); 1484 break; 1485 case Bytecodes::_i2d: 1486 __ cvtsi2sdl(xmm0, rax); 1487 break; 1488 case Bytecodes::_i2b: 1489 __ movsbl(rax, rax); 1490 break; 1491 case Bytecodes::_i2c: 1492 __ movzwl(rax, rax); 1493 break; 1494 case Bytecodes::_i2s: 1495 __ movswl(rax, rax); 1496 break; 1497 case Bytecodes::_l2i: 1498 __ movl(rax, rax); 1499 break; 1500 case Bytecodes::_l2f: 1501 __ cvtsi2ssq(xmm0, rax); 1502 break; 1503 case Bytecodes::_l2d: 1504 __ cvtsi2sdq(xmm0, rax); 1505 break; 1506 case Bytecodes::_f2i: 1507 { 1508 Label L; 1509 __ cvttss2sil(rax, xmm0); 1510 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow? 1511 __ jcc(Assembler::notEqual, L); 1512 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1); 1513 __ bind(L); 1514 } 1515 break; 1516 case Bytecodes::_f2l: 1517 { 1518 Label L; 1519 __ cvttss2siq(rax, xmm0); 1520 // NaN or overflow/underflow? 1521 __ cmp64(rax, ExternalAddress((address) &is_nan)); 1522 __ jcc(Assembler::notEqual, L); 1523 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1); 1524 __ bind(L); 1525 } 1526 break; 1527 case Bytecodes::_f2d: 1528 __ cvtss2sd(xmm0, xmm0); 1529 break; 1530 case Bytecodes::_d2i: 1531 { 1532 Label L; 1533 __ cvttsd2sil(rax, xmm0); 1534 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow? 1535 __ jcc(Assembler::notEqual, L); 1536 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1); 1537 __ bind(L); 1538 } 1539 break; 1540 case Bytecodes::_d2l: 1541 { 1542 Label L; 1543 __ cvttsd2siq(rax, xmm0); 1544 // NaN or overflow/underflow? 1545 __ cmp64(rax, ExternalAddress((address) &is_nan)); 1546 __ jcc(Assembler::notEqual, L); 1547 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1); 1548 __ bind(L); 1549 } 1550 break; 1551 case Bytecodes::_d2f: 1552 __ cvtsd2ss(xmm0, xmm0); 1553 break; 1554 default: 1555 ShouldNotReachHere(); 1556 } 1557 } 1558 1559 void TemplateTable::lcmp() { 1560 transition(ltos, itos); 1561 Label done; 1562 __ pop_l(rdx); 1563 __ cmpq(rdx, rax); 1564 __ movl(rax, -1); 1565 __ jccb(Assembler::less, done); 1566 __ setb(Assembler::notEqual, rax); 1567 __ movzbl(rax, rax); 1568 __ bind(done); 1569 } 1570 1571 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1572 Label done; 1573 if (is_float) { 1574 // XXX get rid of pop here, use ... reg, mem32 1575 __ pop_f(xmm1); 1576 __ ucomiss(xmm1, xmm0); 1577 } else { 1578 // XXX get rid of pop here, use ... reg, mem64 1579 __ pop_d(xmm1); 1580 __ ucomisd(xmm1, xmm0); 1581 } 1582 if (unordered_result < 0) { 1583 __ movl(rax, -1); 1584 __ jccb(Assembler::parity, done); 1585 __ jccb(Assembler::below, done); 1586 __ setb(Assembler::notEqual, rdx); 1587 __ movzbl(rax, rdx); 1588 } else { 1589 __ movl(rax, 1); 1590 __ jccb(Assembler::parity, done); 1591 __ jccb(Assembler::above, done); 1592 __ movl(rax, 0); 1593 __ jccb(Assembler::equal, done); 1594 __ decrementl(rax); 1595 } 1596 __ bind(done); 1597 } 1598 1599 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1600 __ get_method(rcx); // rcx holds method 1601 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx 1602 // holds bumped taken count 1603 1604 const ByteSize be_offset = methodOopDesc::backedge_counter_offset() + 1605 InvocationCounter::counter_offset(); 1606 const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() + 1607 InvocationCounter::counter_offset(); 1608 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 1609 1610 // Load up edx with the branch displacement 1611 __ movl(rdx, at_bcp(1)); 1612 __ bswapl(rdx); 1613 1614 if (!is_wide) { 1615 __ sarl(rdx, 16); 1616 } 1617 __ movl2ptr(rdx, rdx); 1618 1619 // Handle all the JSR stuff here, then exit. 1620 // It's much shorter and cleaner than intermingling with the non-JSR 1621 // normal-branch stuff occurring below. 1622 if (is_jsr) { 1623 // Pre-load the next target bytecode into rbx 1624 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0)); 1625 1626 // compute return address as bci in rax 1627 __ lea(rax, at_bcp((is_wide ? 5 : 3) - 1628 in_bytes(constMethodOopDesc::codes_offset()))); 1629 __ subptr(rax, Address(rcx, methodOopDesc::const_offset())); 1630 // Adjust the bcp in r13 by the displacement in rdx 1631 __ addptr(r13, rdx); 1632 // jsr returns atos that is not an oop 1633 __ push_i(rax); 1634 __ dispatch_only(vtos); 1635 return; 1636 } 1637 1638 // Normal (non-jsr) branch handling 1639 1640 // Adjust the bcp in r13 by the displacement in rdx 1641 __ addptr(r13, rdx); 1642 1643 assert(UseLoopCounter || !UseOnStackReplacement, 1644 "on-stack-replacement requires loop counters"); 1645 Label backedge_counter_overflow; 1646 Label profile_method; 1647 Label dispatch; 1648 if (UseLoopCounter) { 1649 // increment backedge counter for backward branches 1650 // rax: MDO 1651 // ebx: MDO bumped taken-count 1652 // rcx: method 1653 // rdx: target offset 1654 // r13: target bcp 1655 // r14: locals pointer 1656 __ testl(rdx, rdx); // check if forward or backward branch 1657 __ jcc(Assembler::positive, dispatch); // count only if backward branch 1658 if (TieredCompilation) { 1659 Label no_mdo; 1660 int increment = InvocationCounter::count_increment; 1661 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 1662 if (ProfileInterpreter) { 1663 // Are we profiling? 1664 __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset()))); 1665 __ testptr(rbx, rbx); 1666 __ jccb(Assembler::zero, no_mdo); 1667 // Increment the MDO backedge counter 1668 const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) + 1669 in_bytes(InvocationCounter::counter_offset())); 1670 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, 1671 rax, false, Assembler::zero, &backedge_counter_overflow); 1672 __ jmp(dispatch); 1673 } 1674 __ bind(no_mdo); 1675 // Increment backedge counter in methodOop 1676 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask, 1677 rax, false, Assembler::zero, &backedge_counter_overflow); 1678 } else { 1679 // increment counter 1680 __ movl(rax, Address(rcx, be_offset)); // load backedge counter 1681 __ incrementl(rax, InvocationCounter::count_increment); // increment counter 1682 __ movl(Address(rcx, be_offset), rax); // store counter 1683 1684 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter 1685 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits 1686 __ addl(rax, Address(rcx, be_offset)); // add both counters 1687 1688 if (ProfileInterpreter) { 1689 // Test to see if we should create a method data oop 1690 __ cmp32(rax, 1691 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit)); 1692 __ jcc(Assembler::less, dispatch); 1693 1694 // if no method data exists, go to profile method 1695 __ test_method_data_pointer(rax, profile_method); 1696 1697 if (UseOnStackReplacement) { 1698 // check for overflow against ebx which is the MDO taken count 1699 __ cmp32(rbx, 1700 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit)); 1701 __ jcc(Assembler::below, dispatch); 1702 1703 // When ProfileInterpreter is on, the backedge_count comes 1704 // from the methodDataOop, which value does not get reset on 1705 // the call to frequency_counter_overflow(). To avoid 1706 // excessive calls to the overflow routine while the method is 1707 // being compiled, add a second test to make sure the overflow 1708 // function is called only once every overflow_frequency. 1709 const int overflow_frequency = 1024; 1710 __ andl(rbx, overflow_frequency - 1); 1711 __ jcc(Assembler::zero, backedge_counter_overflow); 1712 1713 } 1714 } else { 1715 if (UseOnStackReplacement) { 1716 // check for overflow against eax, which is the sum of the 1717 // counters 1718 __ cmp32(rax, 1719 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit)); 1720 __ jcc(Assembler::aboveEqual, backedge_counter_overflow); 1721 1722 } 1723 } 1724 } 1725 __ bind(dispatch); 1726 } 1727 1728 // Pre-load the next target bytecode into rbx 1729 __ load_unsigned_byte(rbx, Address(r13, 0)); 1730 1731 // continue with the bytecode @ target 1732 // eax: return bci for jsr's, unused otherwise 1733 // ebx: target bytecode 1734 // r13: target bcp 1735 __ dispatch_only(vtos); 1736 1737 if (UseLoopCounter) { 1738 if (ProfileInterpreter) { 1739 // Out-of-line code to allocate method data oop. 1740 __ bind(profile_method); 1741 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1742 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode 1743 __ set_method_data_pointer_for_bcp(); 1744 __ jmp(dispatch); 1745 } 1746 1747 if (UseOnStackReplacement) { 1748 // invocation counter overflow 1749 __ bind(backedge_counter_overflow); 1750 __ negptr(rdx); 1751 __ addptr(rdx, r13); // branch bcp 1752 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp) 1753 __ call_VM(noreg, 1754 CAST_FROM_FN_PTR(address, 1755 InterpreterRuntime::frequency_counter_overflow), 1756 rdx); 1757 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode 1758 1759 // rax: osr nmethod (osr ok) or NULL (osr not possible) 1760 // ebx: target bytecode 1761 // rdx: scratch 1762 // r14: locals pointer 1763 // r13: bcp 1764 __ testptr(rax, rax); // test result 1765 __ jcc(Assembler::zero, dispatch); // no osr if null 1766 // nmethod may have been invalidated (VM may block upon call_VM return) 1767 __ movl(rcx, Address(rax, nmethod::entry_bci_offset())); 1768 __ cmpl(rcx, InvalidOSREntryBci); 1769 __ jcc(Assembler::equal, dispatch); 1770 1771 // We have the address of an on stack replacement routine in eax 1772 // We need to prepare to execute the OSR method. First we must 1773 // migrate the locals and monitors off of the stack. 1774 1775 __ mov(r13, rax); // save the nmethod 1776 1777 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); 1778 1779 // eax is OSR buffer, move it to expected parameter location 1780 __ mov(j_rarg0, rax); 1781 1782 // We use j_rarg definitions here so that registers don't conflict as parameter 1783 // registers change across platforms as we are in the midst of a calling 1784 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters. 1785 1786 const Register retaddr = j_rarg2; 1787 const Register sender_sp = j_rarg1; 1788 1789 // pop the interpreter frame 1790 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp 1791 __ leave(); // remove frame anchor 1792 __ pop(retaddr); // get return address 1793 __ mov(rsp, sender_sp); // set sp to sender sp 1794 // Ensure compiled code always sees stack at proper alignment 1795 __ andptr(rsp, -(StackAlignmentInBytes)); 1796 1797 // unlike x86 we need no specialized return from compiled code 1798 // to the interpreter or the call stub. 1799 1800 // push the return address 1801 __ push(retaddr); 1802 1803 // and begin the OSR nmethod 1804 __ jmp(Address(r13, nmethod::osr_entry_point_offset())); 1805 } 1806 } 1807 } 1808 1809 1810 void TemplateTable::if_0cmp(Condition cc) { 1811 transition(itos, vtos); 1812 // assume branch is more often taken than not (loops use backward branches) 1813 Label not_taken; 1814 __ testl(rax, rax); 1815 __ jcc(j_not(cc), not_taken); 1816 branch(false, false); 1817 __ bind(not_taken); 1818 __ profile_not_taken_branch(rax); 1819 } 1820 1821 void TemplateTable::if_icmp(Condition cc) { 1822 transition(itos, vtos); 1823 // assume branch is more often taken than not (loops use backward branches) 1824 Label not_taken; 1825 __ pop_i(rdx); 1826 __ cmpl(rdx, rax); 1827 __ jcc(j_not(cc), not_taken); 1828 branch(false, false); 1829 __ bind(not_taken); 1830 __ profile_not_taken_branch(rax); 1831 } 1832 1833 void TemplateTable::if_nullcmp(Condition cc) { 1834 transition(atos, vtos); 1835 // assume branch is more often taken than not (loops use backward branches) 1836 Label not_taken; 1837 __ testptr(rax, rax); 1838 __ jcc(j_not(cc), not_taken); 1839 branch(false, false); 1840 __ bind(not_taken); 1841 __ profile_not_taken_branch(rax); 1842 } 1843 1844 void TemplateTable::if_acmp(Condition cc) { 1845 transition(atos, vtos); 1846 // assume branch is more often taken than not (loops use backward branches) 1847 Label not_taken; 1848 __ pop_ptr(rdx); 1849 __ cmpptr(rdx, rax); 1850 __ jcc(j_not(cc), not_taken); 1851 branch(false, false); 1852 __ bind(not_taken); 1853 __ profile_not_taken_branch(rax); 1854 } 1855 1856 void TemplateTable::ret() { 1857 transition(vtos, vtos); 1858 locals_index(rbx); 1859 __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp 1860 __ profile_ret(rbx, rcx); 1861 __ get_method(rax); 1862 __ movptr(r13, Address(rax, methodOopDesc::const_offset())); 1863 __ lea(r13, Address(r13, rbx, Address::times_1, 1864 constMethodOopDesc::codes_offset())); 1865 __ dispatch_next(vtos); 1866 } 1867 1868 void TemplateTable::wide_ret() { 1869 transition(vtos, vtos); 1870 locals_index_wide(rbx); 1871 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp 1872 __ profile_ret(rbx, rcx); 1873 __ get_method(rax); 1874 __ movptr(r13, Address(rax, methodOopDesc::const_offset())); 1875 __ lea(r13, Address(r13, rbx, Address::times_1, constMethodOopDesc::codes_offset())); 1876 __ dispatch_next(vtos); 1877 } 1878 1879 void TemplateTable::tableswitch() { 1880 Label default_case, continue_execution; 1881 transition(itos, vtos); 1882 // align r13 1883 __ lea(rbx, at_bcp(BytesPerInt)); 1884 __ andptr(rbx, -BytesPerInt); 1885 // load lo & hi 1886 __ movl(rcx, Address(rbx, BytesPerInt)); 1887 __ movl(rdx, Address(rbx, 2 * BytesPerInt)); 1888 __ bswapl(rcx); 1889 __ bswapl(rdx); 1890 // check against lo & hi 1891 __ cmpl(rax, rcx); 1892 __ jcc(Assembler::less, default_case); 1893 __ cmpl(rax, rdx); 1894 __ jcc(Assembler::greater, default_case); 1895 // lookup dispatch offset 1896 __ subl(rax, rcx); 1897 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt)); 1898 __ profile_switch_case(rax, rbx, rcx); 1899 // continue execution 1900 __ bind(continue_execution); 1901 __ bswapl(rdx); 1902 __ movl2ptr(rdx, rdx); 1903 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1)); 1904 __ addptr(r13, rdx); 1905 __ dispatch_only(vtos); 1906 // handle default 1907 __ bind(default_case); 1908 __ profile_switch_default(rax); 1909 __ movl(rdx, Address(rbx, 0)); 1910 __ jmp(continue_execution); 1911 } 1912 1913 void TemplateTable::lookupswitch() { 1914 transition(itos, itos); 1915 __ stop("lookupswitch bytecode should have been rewritten"); 1916 } 1917 1918 void TemplateTable::fast_linearswitch() { 1919 transition(itos, vtos); 1920 Label loop_entry, loop, found, continue_execution; 1921 // bswap rax so we can avoid bswapping the table entries 1922 __ bswapl(rax); 1923 // align r13 1924 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of 1925 // this instruction (change offsets 1926 // below) 1927 __ andptr(rbx, -BytesPerInt); 1928 // set counter 1929 __ movl(rcx, Address(rbx, BytesPerInt)); 1930 __ bswapl(rcx); 1931 __ jmpb(loop_entry); 1932 // table search 1933 __ bind(loop); 1934 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt)); 1935 __ jcc(Assembler::equal, found); 1936 __ bind(loop_entry); 1937 __ decrementl(rcx); 1938 __ jcc(Assembler::greaterEqual, loop); 1939 // default case 1940 __ profile_switch_default(rax); 1941 __ movl(rdx, Address(rbx, 0)); 1942 __ jmp(continue_execution); 1943 // entry found -> get offset 1944 __ bind(found); 1945 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt)); 1946 __ profile_switch_case(rcx, rax, rbx); 1947 // continue execution 1948 __ bind(continue_execution); 1949 __ bswapl(rdx); 1950 __ movl2ptr(rdx, rdx); 1951 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1)); 1952 __ addptr(r13, rdx); 1953 __ dispatch_only(vtos); 1954 } 1955 1956 void TemplateTable::fast_binaryswitch() { 1957 transition(itos, vtos); 1958 // Implementation using the following core algorithm: 1959 // 1960 // int binary_search(int key, LookupswitchPair* array, int n) { 1961 // // Binary search according to "Methodik des Programmierens" by 1962 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 1963 // int i = 0; 1964 // int j = n; 1965 // while (i+1 < j) { 1966 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 1967 // // with Q: for all i: 0 <= i < n: key < a[i] 1968 // // where a stands for the array and assuming that the (inexisting) 1969 // // element a[n] is infinitely big. 1970 // int h = (i + j) >> 1; 1971 // // i < h < j 1972 // if (key < array[h].fast_match()) { 1973 // j = h; 1974 // } else { 1975 // i = h; 1976 // } 1977 // } 1978 // // R: a[i] <= key < a[i+1] or Q 1979 // // (i.e., if key is within array, i is the correct index) 1980 // return i; 1981 // } 1982 1983 // Register allocation 1984 const Register key = rax; // already set (tosca) 1985 const Register array = rbx; 1986 const Register i = rcx; 1987 const Register j = rdx; 1988 const Register h = rdi; 1989 const Register temp = rsi; 1990 1991 // Find array start 1992 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to 1993 // get rid of this 1994 // instruction (change 1995 // offsets below) 1996 __ andptr(array, -BytesPerInt); 1997 1998 // Initialize i & j 1999 __ xorl(i, i); // i = 0; 2000 __ movl(j, Address(array, -BytesPerInt)); // j = length(array); 2001 2002 // Convert j into native byteordering 2003 __ bswapl(j); 2004 2005 // And start 2006 Label entry; 2007 __ jmp(entry); 2008 2009 // binary search loop 2010 { 2011 Label loop; 2012 __ bind(loop); 2013 // int h = (i + j) >> 1; 2014 __ leal(h, Address(i, j, Address::times_1)); // h = i + j; 2015 __ sarl(h, 1); // h = (i + j) >> 1; 2016 // if (key < array[h].fast_match()) { 2017 // j = h; 2018 // } else { 2019 // i = h; 2020 // } 2021 // Convert array[h].match to native byte-ordering before compare 2022 __ movl(temp, Address(array, h, Address::times_8)); 2023 __ bswapl(temp); 2024 __ cmpl(key, temp); 2025 // j = h if (key < array[h].fast_match()) 2026 __ cmovl(Assembler::less, j, h); 2027 // i = h if (key >= array[h].fast_match()) 2028 __ cmovl(Assembler::greaterEqual, i, h); 2029 // while (i+1 < j) 2030 __ bind(entry); 2031 __ leal(h, Address(i, 1)); // i+1 2032 __ cmpl(h, j); // i+1 < j 2033 __ jcc(Assembler::less, loop); 2034 } 2035 2036 // end of binary search, result index is i (must check again!) 2037 Label default_case; 2038 // Convert array[i].match to native byte-ordering before compare 2039 __ movl(temp, Address(array, i, Address::times_8)); 2040 __ bswapl(temp); 2041 __ cmpl(key, temp); 2042 __ jcc(Assembler::notEqual, default_case); 2043 2044 // entry found -> j = offset 2045 __ movl(j , Address(array, i, Address::times_8, BytesPerInt)); 2046 __ profile_switch_case(i, key, array); 2047 __ bswapl(j); 2048 __ movl2ptr(j, j); 2049 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1)); 2050 __ addptr(r13, j); 2051 __ dispatch_only(vtos); 2052 2053 // default case -> j = default offset 2054 __ bind(default_case); 2055 __ profile_switch_default(i); 2056 __ movl(j, Address(array, -2 * BytesPerInt)); 2057 __ bswapl(j); 2058 __ movl2ptr(j, j); 2059 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1)); 2060 __ addptr(r13, j); 2061 __ dispatch_only(vtos); 2062 } 2063 2064 2065 void TemplateTable::_return(TosState state) { 2066 transition(state, state); 2067 assert(_desc->calls_vm(), 2068 "inconsistent calls_vm information"); // call in remove_activation 2069 2070 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2071 assert(state == vtos, "only valid state"); 2072 __ movptr(c_rarg1, aaddress(0)); 2073 __ load_klass(rdi, c_rarg1); 2074 __ movl(rdi, Address(rdi, Klass::access_flags_offset())); 2075 __ testl(rdi, JVM_ACC_HAS_FINALIZER); 2076 Label skip_register_finalizer; 2077 __ jcc(Assembler::zero, skip_register_finalizer); 2078 2079 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1); 2080 2081 __ bind(skip_register_finalizer); 2082 } 2083 2084 __ remove_activation(state, r13); 2085 __ jmp(r13); 2086 } 2087 2088 // ---------------------------------------------------------------------------- 2089 // Volatile variables demand their effects be made known to all CPU's 2090 // in order. Store buffers on most chips allow reads & writes to 2091 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode 2092 // without some kind of memory barrier (i.e., it's not sufficient that 2093 // the interpreter does not reorder volatile references, the hardware 2094 // also must not reorder them). 2095 // 2096 // According to the new Java Memory Model (JMM): 2097 // (1) All volatiles are serialized wrt to each other. ALSO reads & 2098 // writes act as aquire & release, so: 2099 // (2) A read cannot let unrelated NON-volatile memory refs that 2100 // happen after the read float up to before the read. It's OK for 2101 // non-volatile memory refs that happen before the volatile read to 2102 // float down below it. 2103 // (3) Similar a volatile write cannot let unrelated NON-volatile 2104 // memory refs that happen BEFORE the write float down to after the 2105 // write. It's OK for non-volatile memory refs that happen after the 2106 // volatile write to float up before it. 2107 // 2108 // We only put in barriers around volatile refs (they are expensive), 2109 // not _between_ memory refs (that would require us to track the 2110 // flavor of the previous memory refs). Requirements (2) and (3) 2111 // require some barriers before volatile stores and after volatile 2112 // loads. These nearly cover requirement (1) but miss the 2113 // volatile-store-volatile-load case. This final case is placed after 2114 // volatile-stores although it could just as well go before 2115 // volatile-loads. 2116 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits 2117 order_constraint) { 2118 // Helper function to insert a is-volatile test and memory barrier 2119 if (os::is_MP()) { // Not needed on single CPU 2120 __ membar(order_constraint); 2121 } 2122 } 2123 2124 void TemplateTable::resolve_cache_and_index(int byte_no, 2125 Register result, 2126 Register Rcache, 2127 Register index, 2128 size_t index_size) { 2129 const Register temp = rbx; 2130 assert_different_registers(result, Rcache, index, temp); 2131 2132 Label resolved; 2133 if (byte_no == f12_oop) { 2134 // We are resolved if the f1 field contains a non-null object (CallSite, MethodType, etc.) 2135 // This kind of CP cache entry does not need to match bytecode_1 or bytecode_2, because 2136 // there is a 1-1 relation between bytecode type and CP entry type. 2137 // The caller will also load a methodOop from f2. 2138 assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD) 2139 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); 2140 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset())); 2141 __ testptr(result, result); 2142 __ jcc(Assembler::notEqual, resolved); 2143 } else { 2144 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2145 assert(result == noreg, ""); //else change code for setting result 2146 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size); 2147 __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode? 2148 __ jcc(Assembler::equal, resolved); 2149 } 2150 2151 // resolve first time through 2152 address entry; 2153 switch (bytecode()) { 2154 case Bytecodes::_getstatic: 2155 case Bytecodes::_putstatic: 2156 case Bytecodes::_getfield: 2157 case Bytecodes::_putfield: 2158 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); 2159 break; 2160 case Bytecodes::_invokevirtual: 2161 case Bytecodes::_invokespecial: 2162 case Bytecodes::_invokestatic: 2163 case Bytecodes::_invokeinterface: 2164 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); 2165 break; 2166 case Bytecodes::_invokehandle: 2167 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); 2168 break; 2169 case Bytecodes::_invokedynamic: 2170 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); 2171 break; 2172 case Bytecodes::_fast_aldc: 2173 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 2174 break; 2175 case Bytecodes::_fast_aldc_w: 2176 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 2177 break; 2178 default: 2179 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode()))); 2180 break; 2181 } 2182 __ movl(temp, (int) bytecode()); 2183 __ call_VM(noreg, entry, temp); 2184 2185 // Update registers with resolved info 2186 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); 2187 if (result != noreg) 2188 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset())); 2189 __ bind(resolved); 2190 } 2191 2192 // The cache and index registers must be set before call 2193 void TemplateTable::load_field_cp_cache_entry(Register obj, 2194 Register cache, 2195 Register index, 2196 Register off, 2197 Register flags, 2198 bool is_static = false) { 2199 assert_different_registers(cache, index, flags, off); 2200 2201 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); 2202 // Field offset 2203 __ movptr(off, Address(cache, index, Address::times_ptr, 2204 in_bytes(cp_base_offset + 2205 ConstantPoolCacheEntry::f2_offset()))); 2206 // Flags 2207 __ movl(flags, Address(cache, index, Address::times_ptr, 2208 in_bytes(cp_base_offset + 2209 ConstantPoolCacheEntry::flags_offset()))); 2210 2211 // klass overwrite register 2212 if (is_static) { 2213 __ movptr(obj, Address(cache, index, Address::times_ptr, 2214 in_bytes(cp_base_offset + 2215 ConstantPoolCacheEntry::f1_offset()))); 2216 } 2217 } 2218 2219 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2220 Register method, 2221 Register itable_index, 2222 Register flags, 2223 bool is_invokevirtual, 2224 bool is_invokevfinal, /*unused*/ 2225 bool is_invokedynamic) { 2226 // setup registers 2227 const Register cache = rcx; 2228 const Register index = rdx; 2229 assert_different_registers(method, flags); 2230 assert_different_registers(method, cache, index); 2231 assert_different_registers(itable_index, flags); 2232 assert_different_registers(itable_index, cache, index); 2233 // determine constant pool cache field offsets 2234 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2235 const int method_offset = in_bytes( 2236 constantPoolCacheOopDesc::base_offset() + 2237 ((byte_no == f2_byte) 2238 ? ConstantPoolCacheEntry::f2_offset() 2239 : ConstantPoolCacheEntry::f1_offset())); 2240 const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + 2241 ConstantPoolCacheEntry::flags_offset()); 2242 // access constant pool cache fields 2243 const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + 2244 ConstantPoolCacheEntry::f2_offset()); 2245 2246 if (byte_no == f12_oop) { 2247 // Resolved f1_oop (CallSite, MethodType, etc.) goes into 'itable_index'. 2248 // Resolved f2_oop (methodOop invoker) will go into 'method' (at index_offset). 2249 // See ConstantPoolCacheEntry::set_dynamic_call and set_method_handle. 2250 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2251 resolve_cache_and_index(byte_no, itable_index, cache, index, index_size); 2252 __ movptr(method, Address(cache, index, Address::times_ptr, index_offset)); 2253 itable_index = noreg; // hack to disable load below 2254 } else { 2255 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); 2256 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset)); 2257 } 2258 if (itable_index != noreg) { 2259 // pick up itable index from f2 also: 2260 assert(byte_no == f1_byte, "already picked up f1"); 2261 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset)); 2262 } 2263 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset)); 2264 } 2265 2266 2267 // The registers cache and index expected to be set before call. 2268 // Correct values of the cache and index registers are preserved. 2269 void TemplateTable::jvmti_post_field_access(Register cache, Register index, 2270 bool is_static, bool has_tos) { 2271 // do the JVMTI work here to avoid disturbing the register state below 2272 // We use c_rarg registers here because we want to use the register used in 2273 // the call to the VM 2274 if (JvmtiExport::can_post_field_access()) { 2275 // Check to see if a field access watch has been set before we 2276 // take the time to call into the VM. 2277 Label L1; 2278 assert_different_registers(cache, index, rax); 2279 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); 2280 __ testl(rax, rax); 2281 __ jcc(Assembler::zero, L1); 2282 2283 __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1); 2284 2285 // cache entry pointer 2286 __ addptr(c_rarg2, in_bytes(constantPoolCacheOopDesc::base_offset())); 2287 __ shll(c_rarg3, LogBytesPerWord); 2288 __ addptr(c_rarg2, c_rarg3); 2289 if (is_static) { 2290 __ xorl(c_rarg1, c_rarg1); // NULL object reference 2291 } else { 2292 __ movptr(c_rarg1, at_tos()); // get object pointer without popping it 2293 __ verify_oop(c_rarg1); 2294 } 2295 // c_rarg1: object pointer or NULL 2296 // c_rarg2: cache entry pointer 2297 // c_rarg3: jvalue object on the stack 2298 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 2299 InterpreterRuntime::post_field_access), 2300 c_rarg1, c_rarg2, c_rarg3); 2301 __ get_cache_and_index_at_bcp(cache, index, 1); 2302 __ bind(L1); 2303 } 2304 } 2305 2306 void TemplateTable::pop_and_check_object(Register r) { 2307 __ pop_ptr(r); 2308 __ null_check(r); // for field access must check obj. 2309 __ verify_oop(r); 2310 } 2311 2312 void TemplateTable::getfield_or_static(int byte_no, bool is_static) { 2313 transition(vtos, vtos); 2314 2315 const Register cache = rcx; 2316 const Register index = rdx; 2317 const Register obj = c_rarg3; 2318 const Register off = rbx; 2319 const Register flags = rax; 2320 const Register bc = c_rarg3; // uses same reg as obj, so don't mix them 2321 2322 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); 2323 jvmti_post_field_access(cache, index, is_static, false); 2324 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); 2325 2326 if (!is_static) { 2327 // obj is on the stack 2328 pop_and_check_object(obj); 2329 } 2330 2331 const Address field(obj, off, Address::times_1); 2332 2333 Label Done, notByte, notInt, notShort, notChar, 2334 notLong, notFloat, notObj, notDouble; 2335 2336 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); 2337 // Make sure we don't need to mask edx after the above shift 2338 assert(btos == 0, "change code, btos != 0"); 2339 2340 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask); 2341 __ jcc(Assembler::notZero, notByte); 2342 // btos 2343 __ load_signed_byte(rax, field); 2344 __ push(btos); 2345 // Rewrite bytecode to be faster 2346 if (!is_static) { 2347 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx); 2348 } 2349 __ jmp(Done); 2350 2351 __ bind(notByte); 2352 __ cmpl(flags, atos); 2353 __ jcc(Assembler::notEqual, notObj); 2354 // atos 2355 __ load_heap_oop(rax, field); 2356 __ push(atos); 2357 if (!is_static) { 2358 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx); 2359 } 2360 __ jmp(Done); 2361 2362 __ bind(notObj); 2363 __ cmpl(flags, itos); 2364 __ jcc(Assembler::notEqual, notInt); 2365 // itos 2366 __ movl(rax, field); 2367 __ push(itos); 2368 // Rewrite bytecode to be faster 2369 if (!is_static) { 2370 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx); 2371 } 2372 __ jmp(Done); 2373 2374 __ bind(notInt); 2375 __ cmpl(flags, ctos); 2376 __ jcc(Assembler::notEqual, notChar); 2377 // ctos 2378 __ load_unsigned_short(rax, field); 2379 __ push(ctos); 2380 // Rewrite bytecode to be faster 2381 if (!is_static) { 2382 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx); 2383 } 2384 __ jmp(Done); 2385 2386 __ bind(notChar); 2387 __ cmpl(flags, stos); 2388 __ jcc(Assembler::notEqual, notShort); 2389 // stos 2390 __ load_signed_short(rax, field); 2391 __ push(stos); 2392 // Rewrite bytecode to be faster 2393 if (!is_static) { 2394 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx); 2395 } 2396 __ jmp(Done); 2397 2398 __ bind(notShort); 2399 __ cmpl(flags, ltos); 2400 __ jcc(Assembler::notEqual, notLong); 2401 // ltos 2402 __ movq(rax, field); 2403 __ push(ltos); 2404 // Rewrite bytecode to be faster 2405 if (!is_static) { 2406 patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx); 2407 } 2408 __ jmp(Done); 2409 2410 __ bind(notLong); 2411 __ cmpl(flags, ftos); 2412 __ jcc(Assembler::notEqual, notFloat); 2413 // ftos 2414 __ movflt(xmm0, field); 2415 __ push(ftos); 2416 // Rewrite bytecode to be faster 2417 if (!is_static) { 2418 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx); 2419 } 2420 __ jmp(Done); 2421 2422 __ bind(notFloat); 2423 #ifdef ASSERT 2424 __ cmpl(flags, dtos); 2425 __ jcc(Assembler::notEqual, notDouble); 2426 #endif 2427 // dtos 2428 __ movdbl(xmm0, field); 2429 __ push(dtos); 2430 // Rewrite bytecode to be faster 2431 if (!is_static) { 2432 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx); 2433 } 2434 #ifdef ASSERT 2435 __ jmp(Done); 2436 2437 __ bind(notDouble); 2438 __ stop("Bad state"); 2439 #endif 2440 2441 __ bind(Done); 2442 // [jk] not needed currently 2443 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad | 2444 // Assembler::LoadStore)); 2445 } 2446 2447 2448 void TemplateTable::getfield(int byte_no) { 2449 getfield_or_static(byte_no, false); 2450 } 2451 2452 void TemplateTable::getstatic(int byte_no) { 2453 getfield_or_static(byte_no, true); 2454 } 2455 2456 // The registers cache and index expected to be set before call. 2457 // The function may destroy various registers, just not the cache and index registers. 2458 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { 2459 transition(vtos, vtos); 2460 2461 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); 2462 2463 if (JvmtiExport::can_post_field_modification()) { 2464 // Check to see if a field modification watch has been set before 2465 // we take the time to call into the VM. 2466 Label L1; 2467 assert_different_registers(cache, index, rax); 2468 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); 2469 __ testl(rax, rax); 2470 __ jcc(Assembler::zero, L1); 2471 2472 __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1); 2473 2474 if (is_static) { 2475 // Life is simple. Null out the object pointer. 2476 __ xorl(c_rarg1, c_rarg1); 2477 } else { 2478 // Life is harder. The stack holds the value on top, followed by 2479 // the object. We don't know the size of the value, though; it 2480 // could be one or two words depending on its type. As a result, 2481 // we must find the type to determine where the object is. 2482 __ movl(c_rarg3, Address(c_rarg2, rscratch1, 2483 Address::times_8, 2484 in_bytes(cp_base_offset + 2485 ConstantPoolCacheEntry::flags_offset()))); 2486 __ shrl(c_rarg3, ConstantPoolCacheEntry::tos_state_shift); 2487 // Make sure we don't need to mask rcx after the above shift 2488 ConstantPoolCacheEntry::verify_tos_state_shift(); 2489 __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue 2490 __ cmpl(c_rarg3, ltos); 2491 __ cmovptr(Assembler::equal, 2492 c_rarg1, at_tos_p2()); // ltos (two word jvalue) 2493 __ cmpl(c_rarg3, dtos); 2494 __ cmovptr(Assembler::equal, 2495 c_rarg1, at_tos_p2()); // dtos (two word jvalue) 2496 } 2497 // cache entry pointer 2498 __ addptr(c_rarg2, in_bytes(cp_base_offset)); 2499 __ shll(rscratch1, LogBytesPerWord); 2500 __ addptr(c_rarg2, rscratch1); 2501 // object (tos) 2502 __ mov(c_rarg3, rsp); 2503 // c_rarg1: object pointer set up above (NULL if static) 2504 // c_rarg2: cache entry pointer 2505 // c_rarg3: jvalue object on the stack 2506 __ call_VM(noreg, 2507 CAST_FROM_FN_PTR(address, 2508 InterpreterRuntime::post_field_modification), 2509 c_rarg1, c_rarg2, c_rarg3); 2510 __ get_cache_and_index_at_bcp(cache, index, 1); 2511 __ bind(L1); 2512 } 2513 } 2514 2515 void TemplateTable::putfield_or_static(int byte_no, bool is_static) { 2516 transition(vtos, vtos); 2517 2518 const Register cache = rcx; 2519 const Register index = rdx; 2520 const Register obj = rcx; 2521 const Register off = rbx; 2522 const Register flags = rax; 2523 const Register bc = c_rarg3; 2524 2525 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); 2526 jvmti_post_field_mod(cache, index, is_static); 2527 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); 2528 2529 // [jk] not needed currently 2530 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | 2531 // Assembler::StoreStore)); 2532 2533 Label notVolatile, Done; 2534 __ movl(rdx, flags); 2535 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); 2536 __ andl(rdx, 0x1); 2537 2538 // field address 2539 const Address field(obj, off, Address::times_1); 2540 2541 Label notByte, notInt, notShort, notChar, 2542 notLong, notFloat, notObj, notDouble; 2543 2544 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); 2545 2546 assert(btos == 0, "change code, btos != 0"); 2547 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask); 2548 __ jcc(Assembler::notZero, notByte); 2549 2550 // btos 2551 { 2552 __ pop(btos); 2553 if (!is_static) pop_and_check_object(obj); 2554 __ movb(field, rax); 2555 if (!is_static) { 2556 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no); 2557 } 2558 __ jmp(Done); 2559 } 2560 2561 __ bind(notByte); 2562 __ cmpl(flags, atos); 2563 __ jcc(Assembler::notEqual, notObj); 2564 2565 // atos 2566 { 2567 __ pop(atos); 2568 if (!is_static) pop_and_check_object(obj); 2569 // Store into the field 2570 do_oop_store(_masm, field, rax, _bs->kind(), false); 2571 if (!is_static) { 2572 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no); 2573 } 2574 __ jmp(Done); 2575 } 2576 2577 __ bind(notObj); 2578 __ cmpl(flags, itos); 2579 __ jcc(Assembler::notEqual, notInt); 2580 2581 // itos 2582 { 2583 __ pop(itos); 2584 if (!is_static) pop_and_check_object(obj); 2585 __ movl(field, rax); 2586 if (!is_static) { 2587 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no); 2588 } 2589 __ jmp(Done); 2590 } 2591 2592 __ bind(notInt); 2593 __ cmpl(flags, ctos); 2594 __ jcc(Assembler::notEqual, notChar); 2595 2596 // ctos 2597 { 2598 __ pop(ctos); 2599 if (!is_static) pop_and_check_object(obj); 2600 __ movw(field, rax); 2601 if (!is_static) { 2602 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no); 2603 } 2604 __ jmp(Done); 2605 } 2606 2607 __ bind(notChar); 2608 __ cmpl(flags, stos); 2609 __ jcc(Assembler::notEqual, notShort); 2610 2611 // stos 2612 { 2613 __ pop(stos); 2614 if (!is_static) pop_and_check_object(obj); 2615 __ movw(field, rax); 2616 if (!is_static) { 2617 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no); 2618 } 2619 __ jmp(Done); 2620 } 2621 2622 __ bind(notShort); 2623 __ cmpl(flags, ltos); 2624 __ jcc(Assembler::notEqual, notLong); 2625 2626 // ltos 2627 { 2628 __ pop(ltos); 2629 if (!is_static) pop_and_check_object(obj); 2630 __ movq(field, rax); 2631 if (!is_static) { 2632 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no); 2633 } 2634 __ jmp(Done); 2635 } 2636 2637 __ bind(notLong); 2638 __ cmpl(flags, ftos); 2639 __ jcc(Assembler::notEqual, notFloat); 2640 2641 // ftos 2642 { 2643 __ pop(ftos); 2644 if (!is_static) pop_and_check_object(obj); 2645 __ movflt(field, xmm0); 2646 if (!is_static) { 2647 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no); 2648 } 2649 __ jmp(Done); 2650 } 2651 2652 __ bind(notFloat); 2653 #ifdef ASSERT 2654 __ cmpl(flags, dtos); 2655 __ jcc(Assembler::notEqual, notDouble); 2656 #endif 2657 2658 // dtos 2659 { 2660 __ pop(dtos); 2661 if (!is_static) pop_and_check_object(obj); 2662 __ movdbl(field, xmm0); 2663 if (!is_static) { 2664 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no); 2665 } 2666 } 2667 2668 #ifdef ASSERT 2669 __ jmp(Done); 2670 2671 __ bind(notDouble); 2672 __ stop("Bad state"); 2673 #endif 2674 2675 __ bind(Done); 2676 2677 // Check for volatile store 2678 __ testl(rdx, rdx); 2679 __ jcc(Assembler::zero, notVolatile); 2680 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | 2681 Assembler::StoreStore)); 2682 __ bind(notVolatile); 2683 } 2684 2685 void TemplateTable::putfield(int byte_no) { 2686 putfield_or_static(byte_no, false); 2687 } 2688 2689 void TemplateTable::putstatic(int byte_no) { 2690 putfield_or_static(byte_no, true); 2691 } 2692 2693 void TemplateTable::jvmti_post_fast_field_mod() { 2694 if (JvmtiExport::can_post_field_modification()) { 2695 // Check to see if a field modification watch has been set before 2696 // we take the time to call into the VM. 2697 Label L2; 2698 __ mov32(c_rarg3, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); 2699 __ testl(c_rarg3, c_rarg3); 2700 __ jcc(Assembler::zero, L2); 2701 __ pop_ptr(rbx); // copy the object pointer from tos 2702 __ verify_oop(rbx); 2703 __ push_ptr(rbx); // put the object pointer back on tos 2704 // Save tos values before call_VM() clobbers them. Since we have 2705 // to do it for every data type, we use the saved values as the 2706 // jvalue object. 2707 switch (bytecode()) { // load values into the jvalue object 2708 case Bytecodes::_fast_aputfield: __ push_ptr(rax); break; 2709 case Bytecodes::_fast_bputfield: // fall through 2710 case Bytecodes::_fast_sputfield: // fall through 2711 case Bytecodes::_fast_cputfield: // fall through 2712 case Bytecodes::_fast_iputfield: __ push_i(rax); break; 2713 case Bytecodes::_fast_dputfield: __ push_d(); break; 2714 case Bytecodes::_fast_fputfield: __ push_f(); break; 2715 case Bytecodes::_fast_lputfield: __ push_l(rax); break; 2716 2717 default: 2718 ShouldNotReachHere(); 2719 } 2720 __ mov(c_rarg3, rsp); // points to jvalue on the stack 2721 // access constant pool cache entry 2722 __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1); 2723 __ verify_oop(rbx); 2724 // rbx: object pointer copied above 2725 // c_rarg2: cache entry pointer 2726 // c_rarg3: jvalue object on the stack 2727 __ call_VM(noreg, 2728 CAST_FROM_FN_PTR(address, 2729 InterpreterRuntime::post_field_modification), 2730 rbx, c_rarg2, c_rarg3); 2731 2732 switch (bytecode()) { // restore tos values 2733 case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break; 2734 case Bytecodes::_fast_bputfield: // fall through 2735 case Bytecodes::_fast_sputfield: // fall through 2736 case Bytecodes::_fast_cputfield: // fall through 2737 case Bytecodes::_fast_iputfield: __ pop_i(rax); break; 2738 case Bytecodes::_fast_dputfield: __ pop_d(); break; 2739 case Bytecodes::_fast_fputfield: __ pop_f(); break; 2740 case Bytecodes::_fast_lputfield: __ pop_l(rax); break; 2741 } 2742 __ bind(L2); 2743 } 2744 } 2745 2746 void TemplateTable::fast_storefield(TosState state) { 2747 transition(state, vtos); 2748 2749 ByteSize base = constantPoolCacheOopDesc::base_offset(); 2750 2751 jvmti_post_fast_field_mod(); 2752 2753 // access constant pool cache 2754 __ get_cache_and_index_at_bcp(rcx, rbx, 1); 2755 2756 // test for volatile with rdx 2757 __ movl(rdx, Address(rcx, rbx, Address::times_8, 2758 in_bytes(base + 2759 ConstantPoolCacheEntry::flags_offset()))); 2760 2761 // replace index with field offset from cache entry 2762 __ movptr(rbx, Address(rcx, rbx, Address::times_8, 2763 in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); 2764 2765 // [jk] not needed currently 2766 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | 2767 // Assembler::StoreStore)); 2768 2769 Label notVolatile; 2770 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); 2771 __ andl(rdx, 0x1); 2772 2773 // Get object from stack 2774 pop_and_check_object(rcx); 2775 2776 // field address 2777 const Address field(rcx, rbx, Address::times_1); 2778 2779 // access field 2780 switch (bytecode()) { 2781 case Bytecodes::_fast_aputfield: 2782 do_oop_store(_masm, field, rax, _bs->kind(), false); 2783 break; 2784 case Bytecodes::_fast_lputfield: 2785 __ movq(field, rax); 2786 break; 2787 case Bytecodes::_fast_iputfield: 2788 __ movl(field, rax); 2789 break; 2790 case Bytecodes::_fast_bputfield: 2791 __ movb(field, rax); 2792 break; 2793 case Bytecodes::_fast_sputfield: 2794 // fall through 2795 case Bytecodes::_fast_cputfield: 2796 __ movw(field, rax); 2797 break; 2798 case Bytecodes::_fast_fputfield: 2799 __ movflt(field, xmm0); 2800 break; 2801 case Bytecodes::_fast_dputfield: 2802 __ movdbl(field, xmm0); 2803 break; 2804 default: 2805 ShouldNotReachHere(); 2806 } 2807 2808 // Check for volatile store 2809 __ testl(rdx, rdx); 2810 __ jcc(Assembler::zero, notVolatile); 2811 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | 2812 Assembler::StoreStore)); 2813 __ bind(notVolatile); 2814 } 2815 2816 2817 void TemplateTable::fast_accessfield(TosState state) { 2818 transition(atos, state); 2819 2820 // Do the JVMTI work here to avoid disturbing the register state below 2821 if (JvmtiExport::can_post_field_access()) { 2822 // Check to see if a field access watch has been set before we 2823 // take the time to call into the VM. 2824 Label L1; 2825 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); 2826 __ testl(rcx, rcx); 2827 __ jcc(Assembler::zero, L1); 2828 // access constant pool cache entry 2829 __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1); 2830 __ verify_oop(rax); 2831 __ push_ptr(rax); // save object pointer before call_VM() clobbers it 2832 __ mov(c_rarg1, rax); 2833 // c_rarg1: object pointer copied above 2834 // c_rarg2: cache entry pointer 2835 __ call_VM(noreg, 2836 CAST_FROM_FN_PTR(address, 2837 InterpreterRuntime::post_field_access), 2838 c_rarg1, c_rarg2); 2839 __ pop_ptr(rax); // restore object pointer 2840 __ bind(L1); 2841 } 2842 2843 // access constant pool cache 2844 __ get_cache_and_index_at_bcp(rcx, rbx, 1); 2845 // replace index with field offset from cache entry 2846 // [jk] not needed currently 2847 // if (os::is_MP()) { 2848 // __ movl(rdx, Address(rcx, rbx, Address::times_8, 2849 // in_bytes(constantPoolCacheOopDesc::base_offset() + 2850 // ConstantPoolCacheEntry::flags_offset()))); 2851 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); 2852 // __ andl(rdx, 0x1); 2853 // } 2854 __ movptr(rbx, Address(rcx, rbx, Address::times_8, 2855 in_bytes(constantPoolCacheOopDesc::base_offset() + 2856 ConstantPoolCacheEntry::f2_offset()))); 2857 2858 // rax: object 2859 __ verify_oop(rax); 2860 __ null_check(rax); 2861 Address field(rax, rbx, Address::times_1); 2862 2863 // access field 2864 switch (bytecode()) { 2865 case Bytecodes::_fast_agetfield: 2866 __ load_heap_oop(rax, field); 2867 __ verify_oop(rax); 2868 break; 2869 case Bytecodes::_fast_lgetfield: 2870 __ movq(rax, field); 2871 break; 2872 case Bytecodes::_fast_igetfield: 2873 __ movl(rax, field); 2874 break; 2875 case Bytecodes::_fast_bgetfield: 2876 __ movsbl(rax, field); 2877 break; 2878 case Bytecodes::_fast_sgetfield: 2879 __ load_signed_short(rax, field); 2880 break; 2881 case Bytecodes::_fast_cgetfield: 2882 __ load_unsigned_short(rax, field); 2883 break; 2884 case Bytecodes::_fast_fgetfield: 2885 __ movflt(xmm0, field); 2886 break; 2887 case Bytecodes::_fast_dgetfield: 2888 __ movdbl(xmm0, field); 2889 break; 2890 default: 2891 ShouldNotReachHere(); 2892 } 2893 // [jk] not needed currently 2894 // if (os::is_MP()) { 2895 // Label notVolatile; 2896 // __ testl(rdx, rdx); 2897 // __ jcc(Assembler::zero, notVolatile); 2898 // __ membar(Assembler::LoadLoad); 2899 // __ bind(notVolatile); 2900 //}; 2901 } 2902 2903 void TemplateTable::fast_xaccess(TosState state) { 2904 transition(vtos, state); 2905 2906 // get receiver 2907 __ movptr(rax, aaddress(0)); 2908 // access constant pool cache 2909 __ get_cache_and_index_at_bcp(rcx, rdx, 2); 2910 __ movptr(rbx, 2911 Address(rcx, rdx, Address::times_8, 2912 in_bytes(constantPoolCacheOopDesc::base_offset() + 2913 ConstantPoolCacheEntry::f2_offset()))); 2914 // make sure exception is reported in correct bcp range (getfield is 2915 // next instruction) 2916 __ increment(r13); 2917 __ null_check(rax); 2918 switch (state) { 2919 case itos: 2920 __ movl(rax, Address(rax, rbx, Address::times_1)); 2921 break; 2922 case atos: 2923 __ load_heap_oop(rax, Address(rax, rbx, Address::times_1)); 2924 __ verify_oop(rax); 2925 break; 2926 case ftos: 2927 __ movflt(xmm0, Address(rax, rbx, Address::times_1)); 2928 break; 2929 default: 2930 ShouldNotReachHere(); 2931 } 2932 2933 // [jk] not needed currently 2934 // if (os::is_MP()) { 2935 // Label notVolatile; 2936 // __ movl(rdx, Address(rcx, rdx, Address::times_8, 2937 // in_bytes(constantPoolCacheOopDesc::base_offset() + 2938 // ConstantPoolCacheEntry::flags_offset()))); 2939 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); 2940 // __ testl(rdx, 0x1); 2941 // __ jcc(Assembler::zero, notVolatile); 2942 // __ membar(Assembler::LoadLoad); 2943 // __ bind(notVolatile); 2944 // } 2945 2946 __ decrement(r13); 2947 } 2948 2949 2950 2951 //----------------------------------------------------------------------------- 2952 // Calls 2953 2954 void TemplateTable::count_calls(Register method, Register temp) { 2955 // implemented elsewhere 2956 ShouldNotReachHere(); 2957 } 2958 2959 void TemplateTable::prepare_invoke(int byte_no, 2960 Register method, // linked method (or i-klass) 2961 Register index, // itable index, MethodType, etc. 2962 Register recv, // if caller wants to see it 2963 Register flags // if caller wants to test it 2964 ) { 2965 // determine flags 2966 const Bytecodes::Code code = bytecode(); 2967 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 2968 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 2969 const bool is_invokehandle = code == Bytecodes::_invokehandle; 2970 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 2971 const bool is_invokespecial = code == Bytecodes::_invokespecial; 2972 const bool load_receiver = (recv != noreg); 2973 const bool save_flags = (flags != noreg); 2974 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 2975 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal"); 2976 assert(flags == noreg || flags == rdx, ""); 2977 assert(recv == noreg || recv == rcx, ""); 2978 2979 // setup registers & access constant pool cache 2980 if (recv == noreg) recv = rcx; 2981 if (flags == noreg) flags = rdx; 2982 assert_different_registers(method, index, recv, flags); 2983 2984 // save 'interpreter return address' 2985 __ save_bcp(); 2986 2987 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); 2988 2989 // maybe push appendix to arguments (just before return address) 2990 if (is_invokedynamic || is_invokehandle) { 2991 Label L_no_push; 2992 __ verify_oop(index); 2993 __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift)); 2994 __ jccb(Assembler::zero, L_no_push); 2995 // Push the appendix as a trailing parameter. 2996 // This must be done before we get the receiver, 2997 // since the parameter_size includes it. 2998 __ push(index); // push appendix (MethodType, CallSite, etc.) 2999 __ bind(L_no_push); 3000 } 3001 3002 // load receiver if needed (after appendix is pushed so parameter size is correct) 3003 // Note: no return address pushed yet 3004 if (load_receiver) { 3005 __ movl(recv, flags); 3006 __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask); 3007 const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address 3008 const int receiver_is_at_end = -1; // back off one slot to get receiver 3009 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end); 3010 __ movptr(recv, recv_addr); 3011 __ verify_oop(recv); 3012 } 3013 3014 if (save_flags) { 3015 __ movl(r13, flags); 3016 } 3017 3018 // compute return type 3019 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); 3020 // Make sure we don't need to mask flags after the above shift 3021 ConstantPoolCacheEntry::verify_tos_state_shift(); 3022 // load return address 3023 { 3024 const address table_addr = (is_invokeinterface || is_invokedynamic) ? 3025 (address)Interpreter::return_5_addrs_by_index_table() : 3026 (address)Interpreter::return_3_addrs_by_index_table(); 3027 ExternalAddress table(table_addr); 3028 __ lea(rscratch1, table); 3029 __ movptr(flags, Address(rscratch1, flags, Address::times_ptr)); 3030 } 3031 3032 // push return address 3033 __ push(flags); 3034 3035 // Restore flags value from the constant pool cache, and restore rsi 3036 // for later null checks. r13 is the bytecode pointer 3037 if (save_flags) { 3038 __ movl(flags, r13); 3039 __ restore_bcp(); 3040 } 3041 } 3042 3043 3044 void TemplateTable::invokevirtual_helper(Register index, 3045 Register recv, 3046 Register flags) { 3047 // Uses temporary registers rax, rdx 3048 assert_different_registers(index, recv, rax, rdx); 3049 assert(index == rbx, ""); 3050 assert(recv == rcx, ""); 3051 3052 // Test for an invoke of a final method 3053 Label notFinal; 3054 __ movl(rax, flags); 3055 __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift)); 3056 __ jcc(Assembler::zero, notFinal); 3057 3058 const Register method = index; // method must be rbx 3059 assert(method == rbx, 3060 "methodOop must be rbx for interpreter calling convention"); 3061 3062 // do the call - the index is actually the method to call 3063 // that is, f2 is a vtable index if !is_vfinal, else f2 is a methodOop 3064 __ verify_oop(method); 3065 3066 // It's final, need a null check here! 3067 __ null_check(recv); 3068 3069 // profile this call 3070 __ profile_final_call(rax); 3071 3072 __ jump_from_interpreted(method, rax); 3073 3074 __ bind(notFinal); 3075 3076 // get receiver klass 3077 __ null_check(recv, oopDesc::klass_offset_in_bytes()); 3078 __ load_klass(rax, recv); 3079 __ verify_oop(rax); 3080 3081 // profile this call 3082 __ profile_virtual_call(rax, r14, rdx); 3083 3084 // get target methodOop & entry point 3085 __ lookup_virtual_method(rax, index, method); 3086 __ jump_from_interpreted(method, rdx); 3087 } 3088 3089 3090 void TemplateTable::invokevirtual(int byte_no) { 3091 transition(vtos, vtos); 3092 assert(byte_no == f2_byte, "use this argument"); 3093 prepare_invoke(byte_no, 3094 rbx, // method or vtable index 3095 noreg, // unused itable index 3096 rcx, rdx); // recv, flags 3097 3098 // rbx: index 3099 // rcx: receiver 3100 // rdx: flags 3101 3102 invokevirtual_helper(rbx, rcx, rdx); 3103 } 3104 3105 3106 void TemplateTable::invokespecial(int byte_no) { 3107 transition(vtos, vtos); 3108 assert(byte_no == f1_byte, "use this argument"); 3109 prepare_invoke(byte_no, rbx, noreg, // get f1 methodOop 3110 rcx); // get receiver also for null check 3111 __ verify_oop(rcx); 3112 __ null_check(rcx); 3113 // do the call 3114 __ verify_oop(rbx); 3115 __ profile_call(rax); 3116 __ jump_from_interpreted(rbx, rax); 3117 } 3118 3119 3120 void TemplateTable::invokestatic(int byte_no) { 3121 transition(vtos, vtos); 3122 assert(byte_no == f1_byte, "use this argument"); 3123 prepare_invoke(byte_no, rbx); // get f1 methodOop 3124 // do the call 3125 __ verify_oop(rbx); 3126 __ profile_call(rax); 3127 __ jump_from_interpreted(rbx, rax); 3128 } 3129 3130 void TemplateTable::fast_invokevfinal(int byte_no) { 3131 transition(vtos, vtos); 3132 assert(byte_no == f2_byte, "use this argument"); 3133 __ stop("fast_invokevfinal not used on amd64"); 3134 } 3135 3136 void TemplateTable::invokeinterface(int byte_no) { 3137 transition(vtos, vtos); 3138 assert(byte_no == f1_byte, "use this argument"); 3139 prepare_invoke(byte_no, rax, rbx, // get f1 klassOop, f2 itable index 3140 rcx, rdx); // recv, flags 3141 3142 // rax: interface klass (from f1) 3143 // rbx: itable index (from f2) 3144 // rcx: receiver 3145 // rdx: flags 3146 3147 // Special case of invokeinterface called for virtual method of 3148 // java.lang.Object. See cpCacheOop.cpp for details. 3149 // This code isn't produced by javac, but could be produced by 3150 // another compliant java compiler. 3151 Label notMethod; 3152 __ movl(r14, rdx); 3153 __ andl(r14, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift)); 3154 __ jcc(Assembler::zero, notMethod); 3155 3156 invokevirtual_helper(rbx, rcx, rdx); 3157 __ bind(notMethod); 3158 3159 // Get receiver klass into rdx - also a null check 3160 __ restore_locals(); // restore r14 3161 __ null_check(rcx, oopDesc::klass_offset_in_bytes()); 3162 __ load_klass(rdx, rcx); 3163 __ verify_oop(rdx); 3164 3165 // profile this call 3166 __ profile_virtual_call(rdx, r13, r14); 3167 3168 Label no_such_interface, no_such_method; 3169 3170 __ lookup_interface_method(// inputs: rec. class, interface, itable index 3171 rdx, rax, rbx, 3172 // outputs: method, scan temp. reg 3173 rbx, r13, 3174 no_such_interface); 3175 3176 // rbx: methodOop to call 3177 // rcx: receiver 3178 // Check for abstract method error 3179 // Note: This should be done more efficiently via a throw_abstract_method_error 3180 // interpreter entry point and a conditional jump to it in case of a null 3181 // method. 3182 __ testptr(rbx, rbx); 3183 __ jcc(Assembler::zero, no_such_method); 3184 3185 // do the call 3186 // rcx: receiver 3187 // rbx,: methodOop 3188 __ jump_from_interpreted(rbx, rdx); 3189 __ should_not_reach_here(); 3190 3191 // exception handling code follows... 3192 // note: must restore interpreter registers to canonical 3193 // state for exception handling to work correctly! 3194 3195 __ bind(no_such_method); 3196 // throw exception 3197 __ pop(rbx); // pop return address (pushed by prepare_invoke) 3198 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed) 3199 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 3200 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 3201 // the call_VM checks for exception, so we should never return here. 3202 __ should_not_reach_here(); 3203 3204 __ bind(no_such_interface); 3205 // throw exception 3206 __ pop(rbx); // pop return address (pushed by prepare_invoke) 3207 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed) 3208 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 3209 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 3210 InterpreterRuntime::throw_IncompatibleClassChangeError)); 3211 // the call_VM checks for exception, so we should never return here. 3212 __ should_not_reach_here(); 3213 } 3214 3215 3216 void TemplateTable::invokehandle(int byte_no) { 3217 transition(vtos, vtos); 3218 assert(byte_no == f12_oop, "use this argument"); 3219 const Register rbx_method = rbx; // f2 3220 const Register rax_mtype = rax; // f1 3221 const Register rcx_recv = rcx; 3222 const Register rdx_flags = rdx; 3223 3224 if (!EnableInvokeDynamic) { 3225 // rewriter does not generate this bytecode 3226 __ should_not_reach_here(); 3227 return; 3228 } 3229 3230 prepare_invoke(byte_no, 3231 rbx_method, rax_mtype, // get f2 methodOop, f1 MethodType 3232 rcx_recv); 3233 __ verify_oop(rbx_method); 3234 __ verify_oop(rcx_recv); 3235 __ null_check(rcx_recv); 3236 3237 // Note: rax_mtype is already pushed (if necessary) by prepare_invoke 3238 3239 // FIXME: profile the LambdaForm also 3240 __ profile_final_call(rax); 3241 3242 __ jump_from_interpreted(rbx_method, rdx); 3243 } 3244 3245 3246 void TemplateTable::invokedynamic(int byte_no) { 3247 transition(vtos, vtos); 3248 assert(byte_no == f12_oop, "use this argument"); 3249 3250 if (!EnableInvokeDynamic) { 3251 // We should not encounter this bytecode if !EnableInvokeDynamic. 3252 // The verifier will stop it. However, if we get past the verifier, 3253 // this will stop the thread in a reasonable way, without crashing the JVM. 3254 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 3255 InterpreterRuntime::throw_IncompatibleClassChangeError)); 3256 // the call_VM checks for exception, so we should never return here. 3257 __ should_not_reach_here(); 3258 return; 3259 } 3260 3261 const Register rbx_method = rbx; 3262 const Register rax_callsite = rax; 3263 3264 prepare_invoke(byte_no, rbx_method, rax_callsite); 3265 3266 // rax: CallSite object (from f1) 3267 // rbx: MH.linkToCallSite method (from f2) 3268 3269 // Note: rax_callsite is already pushed by prepare_invoke 3270 3271 // %%% should make a type profile for any invokedynamic that takes a ref argument 3272 // profile this call 3273 __ profile_call(r13); 3274 3275 __ verify_oop(rax_callsite); 3276 3277 __ jump_from_interpreted(rbx_method, rdx); 3278 } 3279 3280 3281 //----------------------------------------------------------------------------- 3282 // Allocation 3283 3284 void TemplateTable::_new() { 3285 transition(vtos, atos); 3286 __ get_unsigned_2_byte_index_at_bcp(rdx, 1); 3287 Label slow_case; 3288 Label done; 3289 Label initialize_header; 3290 Label initialize_object; // including clearing the fields 3291 Label allocate_shared; 3292 3293 __ get_cpool_and_tags(rsi, rax); 3294 // Make sure the class we're about to instantiate has been resolved. 3295 // This is done before loading instanceKlass to be consistent with the order 3296 // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put) 3297 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize; 3298 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), 3299 JVM_CONSTANT_Class); 3300 __ jcc(Assembler::notEqual, slow_case); 3301 3302 // get instanceKlass 3303 __ movptr(rsi, Address(rsi, rdx, 3304 Address::times_8, sizeof(constantPoolOopDesc))); 3305 3306 // make sure klass is initialized & doesn't have finalizer 3307 // make sure klass is fully initialized 3308 __ cmpb(Address(rsi, 3309 instanceKlass::init_state_offset()), 3310 instanceKlass::fully_initialized); 3311 __ jcc(Assembler::notEqual, slow_case); 3312 3313 // get instance_size in instanceKlass (scaled to a count of bytes) 3314 __ movl(rdx, 3315 Address(rsi, 3316 Klass::layout_helper_offset())); 3317 // test to see if it has a finalizer or is malformed in some way 3318 __ testl(rdx, Klass::_lh_instance_slow_path_bit); 3319 __ jcc(Assembler::notZero, slow_case); 3320 3321 // Allocate the instance 3322 // 1) Try to allocate in the TLAB 3323 // 2) if fail and the object is large allocate in the shared Eden 3324 // 3) if the above fails (or is not applicable), go to a slow case 3325 // (creates a new TLAB, etc.) 3326 3327 const bool allow_shared_alloc = 3328 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; 3329 3330 if (UseTLAB) { 3331 __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); 3332 __ lea(rbx, Address(rax, rdx, Address::times_1)); 3333 __ cmpptr(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset()))); 3334 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case); 3335 __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx); 3336 if (ZeroTLAB) { 3337 // the fields have been already cleared 3338 __ jmp(initialize_header); 3339 } else { 3340 // initialize both the header and fields 3341 __ jmp(initialize_object); 3342 } 3343 } 3344 3345 // Allocation in the shared Eden, if allowed. 3346 // 3347 // rdx: instance size in bytes 3348 if (allow_shared_alloc) { 3349 __ bind(allocate_shared); 3350 3351 ExternalAddress top((address)Universe::heap()->top_addr()); 3352 ExternalAddress end((address)Universe::heap()->end_addr()); 3353 3354 const Register RtopAddr = rscratch1; 3355 const Register RendAddr = rscratch2; 3356 3357 __ lea(RtopAddr, top); 3358 __ lea(RendAddr, end); 3359 __ movptr(rax, Address(RtopAddr, 0)); 3360 3361 // For retries rax gets set by cmpxchgq 3362 Label retry; 3363 __ bind(retry); 3364 __ lea(rbx, Address(rax, rdx, Address::times_1)); 3365 __ cmpptr(rbx, Address(RendAddr, 0)); 3366 __ jcc(Assembler::above, slow_case); 3367 3368 // Compare rax with the top addr, and if still equal, store the new 3369 // top addr in rbx at the address of the top addr pointer. Sets ZF if was 3370 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs. 3371 // 3372 // rax: object begin 3373 // rbx: object end 3374 // rdx: instance size in bytes 3375 if (os::is_MP()) { 3376 __ lock(); 3377 } 3378 __ cmpxchgptr(rbx, Address(RtopAddr, 0)); 3379 3380 // if someone beat us on the allocation, try again, otherwise continue 3381 __ jcc(Assembler::notEqual, retry); 3382 3383 __ incr_allocated_bytes(r15_thread, rdx, 0); 3384 } 3385 3386 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { 3387 // The object is initialized before the header. If the object size is 3388 // zero, go directly to the header initialization. 3389 __ bind(initialize_object); 3390 __ decrementl(rdx, sizeof(oopDesc)); 3391 __ jcc(Assembler::zero, initialize_header); 3392 3393 // Initialize object fields 3394 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code) 3395 __ shrl(rdx, LogBytesPerLong); // divide by oopSize to simplify the loop 3396 { 3397 Label loop; 3398 __ bind(loop); 3399 __ movq(Address(rax, rdx, Address::times_8, 3400 sizeof(oopDesc) - oopSize), 3401 rcx); 3402 __ decrementl(rdx); 3403 __ jcc(Assembler::notZero, loop); 3404 } 3405 3406 // initialize object header only. 3407 __ bind(initialize_header); 3408 if (UseBiasedLocking) { 3409 __ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset())); 3410 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1); 3411 } else { 3412 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), 3413 (intptr_t) markOopDesc::prototype()); // header (address 0x1) 3414 } 3415 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code) 3416 __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops 3417 __ store_klass(rax, rsi); // store klass last 3418 3419 { 3420 SkipIfEqual skip(_masm, &DTraceAllocProbes, false); 3421 // Trigger dtrace event for fastpath 3422 __ push(atos); // save the return value 3423 __ call_VM_leaf( 3424 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax); 3425 __ pop(atos); // restore the return value 3426 3427 } 3428 __ jmp(done); 3429 } 3430 3431 3432 // slow case 3433 __ bind(slow_case); 3434 __ get_constant_pool(c_rarg1); 3435 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); 3436 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2); 3437 __ verify_oop(rax); 3438 3439 // continue 3440 __ bind(done); 3441 } 3442 3443 void TemplateTable::newarray() { 3444 transition(itos, atos); 3445 __ load_unsigned_byte(c_rarg1, at_bcp(1)); 3446 __ movl(c_rarg2, rax); 3447 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), 3448 c_rarg1, c_rarg2); 3449 } 3450 3451 void TemplateTable::anewarray() { 3452 transition(itos, atos); 3453 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); 3454 __ get_constant_pool(c_rarg1); 3455 __ movl(c_rarg3, rax); 3456 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), 3457 c_rarg1, c_rarg2, c_rarg3); 3458 } 3459 3460 void TemplateTable::arraylength() { 3461 transition(atos, itos); 3462 __ null_check(rax, arrayOopDesc::length_offset_in_bytes()); 3463 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes())); 3464 } 3465 3466 void TemplateTable::checkcast() { 3467 transition(atos, atos); 3468 Label done, is_null, ok_is_subtype, quicked, resolved; 3469 __ testptr(rax, rax); // object is in rax 3470 __ jcc(Assembler::zero, is_null); 3471 3472 // Get cpool & tags index 3473 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array 3474 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index 3475 // See if bytecode has already been quicked 3476 __ cmpb(Address(rdx, rbx, 3477 Address::times_1, 3478 typeArrayOopDesc::header_size(T_BYTE) * wordSize), 3479 JVM_CONSTANT_Class); 3480 __ jcc(Assembler::equal, quicked); 3481 __ push(atos); // save receiver for result, and for GC 3482 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3483 __ pop_ptr(rdx); // restore receiver 3484 __ jmpb(resolved); 3485 3486 // Get superklass in rax and subklass in rbx 3487 __ bind(quicked); 3488 __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check 3489 __ movptr(rax, Address(rcx, rbx, 3490 Address::times_8, sizeof(constantPoolOopDesc))); 3491 3492 __ bind(resolved); 3493 __ load_klass(rbx, rdx); 3494 3495 // Generate subtype check. Blows rcx, rdi. Object in rdx. 3496 // Superklass in rax. Subklass in rbx. 3497 __ gen_subtype_check(rbx, ok_is_subtype); 3498 3499 // Come here on failure 3500 __ push_ptr(rdx); 3501 // object is at TOS 3502 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry)); 3503 3504 // Come here on success 3505 __ bind(ok_is_subtype); 3506 __ mov(rax, rdx); // Restore object in rdx 3507 3508 // Collect counts on whether this check-cast sees NULLs a lot or not. 3509 if (ProfileInterpreter) { 3510 __ jmp(done); 3511 __ bind(is_null); 3512 __ profile_null_seen(rcx); 3513 } else { 3514 __ bind(is_null); // same as 'done' 3515 } 3516 __ bind(done); 3517 } 3518 3519 void TemplateTable::instanceof() { 3520 transition(atos, itos); 3521 Label done, is_null, ok_is_subtype, quicked, resolved; 3522 __ testptr(rax, rax); 3523 __ jcc(Assembler::zero, is_null); 3524 3525 // Get cpool & tags index 3526 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array 3527 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index 3528 // See if bytecode has already been quicked 3529 __ cmpb(Address(rdx, rbx, 3530 Address::times_1, 3531 typeArrayOopDesc::header_size(T_BYTE) * wordSize), 3532 JVM_CONSTANT_Class); 3533 __ jcc(Assembler::equal, quicked); 3534 3535 __ push(atos); // save receiver for result, and for GC 3536 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3537 __ pop_ptr(rdx); // restore receiver 3538 __ verify_oop(rdx); 3539 __ load_klass(rdx, rdx); 3540 __ jmpb(resolved); 3541 3542 // Get superklass in rax and subklass in rdx 3543 __ bind(quicked); 3544 __ load_klass(rdx, rax); 3545 __ movptr(rax, Address(rcx, rbx, 3546 Address::times_8, sizeof(constantPoolOopDesc))); 3547 3548 __ bind(resolved); 3549 3550 // Generate subtype check. Blows rcx, rdi 3551 // Superklass in rax. Subklass in rdx. 3552 __ gen_subtype_check(rdx, ok_is_subtype); 3553 3554 // Come here on failure 3555 __ xorl(rax, rax); 3556 __ jmpb(done); 3557 // Come here on success 3558 __ bind(ok_is_subtype); 3559 __ movl(rax, 1); 3560 3561 // Collect counts on whether this test sees NULLs a lot or not. 3562 if (ProfileInterpreter) { 3563 __ jmp(done); 3564 __ bind(is_null); 3565 __ profile_null_seen(rcx); 3566 } else { 3567 __ bind(is_null); // same as 'done' 3568 } 3569 __ bind(done); 3570 // rax = 0: obj == NULL or obj is not an instanceof the specified klass 3571 // rax = 1: obj != NULL and obj is an instanceof the specified klass 3572 } 3573 3574 //----------------------------------------------------------------------------- 3575 // Breakpoints 3576 void TemplateTable::_breakpoint() { 3577 // Note: We get here even if we are single stepping.. 3578 // jbug inists on setting breakpoints at every bytecode 3579 // even if we are in single step mode. 3580 3581 transition(vtos, vtos); 3582 3583 // get the unpatched byte code 3584 __ get_method(c_rarg1); 3585 __ call_VM(noreg, 3586 CAST_FROM_FN_PTR(address, 3587 InterpreterRuntime::get_original_bytecode_at), 3588 c_rarg1, r13); 3589 __ mov(rbx, rax); 3590 3591 // post the breakpoint event 3592 __ get_method(c_rarg1); 3593 __ call_VM(noreg, 3594 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), 3595 c_rarg1, r13); 3596 3597 // complete the execution of original bytecode 3598 __ dispatch_only_normal(vtos); 3599 } 3600 3601 //----------------------------------------------------------------------------- 3602 // Exceptions 3603 3604 void TemplateTable::athrow() { 3605 transition(atos, vtos); 3606 __ null_check(rax); 3607 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 3608 } 3609 3610 //----------------------------------------------------------------------------- 3611 // Synchronization 3612 // 3613 // Note: monitorenter & exit are symmetric routines; which is reflected 3614 // in the assembly code structure as well 3615 // 3616 // Stack layout: 3617 // 3618 // [expressions ] <--- rsp = expression stack top 3619 // .. 3620 // [expressions ] 3621 // [monitor entry] <--- monitor block top = expression stack bot 3622 // .. 3623 // [monitor entry] 3624 // [frame data ] <--- monitor block bot 3625 // ... 3626 // [saved rbp ] <--- rbp 3627 void TemplateTable::monitorenter() { 3628 transition(atos, vtos); 3629 3630 // check for NULL object 3631 __ null_check(rax); 3632 3633 const Address monitor_block_top( 3634 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 3635 const Address monitor_block_bot( 3636 rbp, frame::interpreter_frame_initial_sp_offset * wordSize); 3637 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 3638 3639 Label allocated; 3640 3641 // initialize entry pointer 3642 __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL 3643 3644 // find a free slot in the monitor block (result in c_rarg1) 3645 { 3646 Label entry, loop, exit; 3647 __ movptr(c_rarg3, monitor_block_top); // points to current entry, 3648 // starting with top-most entry 3649 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom 3650 // of monitor block 3651 __ jmpb(entry); 3652 3653 __ bind(loop); 3654 // check if current entry is used 3655 __ cmpptr(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD); 3656 // if not used then remember entry in c_rarg1 3657 __ cmov(Assembler::equal, c_rarg1, c_rarg3); 3658 // check if current entry is for same object 3659 __ cmpptr(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes())); 3660 // if same object then stop searching 3661 __ jccb(Assembler::equal, exit); 3662 // otherwise advance to next entry 3663 __ addptr(c_rarg3, entry_size); 3664 __ bind(entry); 3665 // check if bottom reached 3666 __ cmpptr(c_rarg3, c_rarg2); 3667 // if not at bottom then check this entry 3668 __ jcc(Assembler::notEqual, loop); 3669 __ bind(exit); 3670 } 3671 3672 __ testptr(c_rarg1, c_rarg1); // check if a slot has been found 3673 __ jcc(Assembler::notZero, allocated); // if found, continue with that one 3674 3675 // allocate one if there's no free slot 3676 { 3677 Label entry, loop; 3678 // 1. compute new pointers // rsp: old expression stack top 3679 __ movptr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom 3680 __ subptr(rsp, entry_size); // move expression stack top 3681 __ subptr(c_rarg1, entry_size); // move expression stack bottom 3682 __ mov(c_rarg3, rsp); // set start value for copy loop 3683 __ movptr(monitor_block_bot, c_rarg1); // set new monitor block bottom 3684 __ jmp(entry); 3685 // 2. move expression stack contents 3686 __ bind(loop); 3687 __ movptr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack 3688 // word from old location 3689 __ movptr(Address(c_rarg3, 0), c_rarg2); // and store it at new location 3690 __ addptr(c_rarg3, wordSize); // advance to next word 3691 __ bind(entry); 3692 __ cmpptr(c_rarg3, c_rarg1); // check if bottom reached 3693 __ jcc(Assembler::notEqual, loop); // if not at bottom then 3694 // copy next word 3695 } 3696 3697 // call run-time routine 3698 // c_rarg1: points to monitor entry 3699 __ bind(allocated); 3700 3701 // Increment bcp to point to the next bytecode, so exception 3702 // handling for async. exceptions work correctly. 3703 // The object has already been poped from the stack, so the 3704 // expression stack looks correct. 3705 __ increment(r13); 3706 3707 // store object 3708 __ movptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax); 3709 __ lock_object(c_rarg1); 3710 3711 // check to make sure this monitor doesn't cause stack overflow after locking 3712 __ save_bcp(); // in case of exception 3713 __ generate_stack_overflow_check(0); 3714 3715 // The bcp has already been incremented. Just need to dispatch to 3716 // next instruction. 3717 __ dispatch_next(vtos); 3718 } 3719 3720 3721 void TemplateTable::monitorexit() { 3722 transition(atos, vtos); 3723 3724 // check for NULL object 3725 __ null_check(rax); 3726 3727 const Address monitor_block_top( 3728 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); 3729 const Address monitor_block_bot( 3730 rbp, frame::interpreter_frame_initial_sp_offset * wordSize); 3731 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 3732 3733 Label found; 3734 3735 // find matching slot 3736 { 3737 Label entry, loop; 3738 __ movptr(c_rarg1, monitor_block_top); // points to current entry, 3739 // starting with top-most entry 3740 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom 3741 // of monitor block 3742 __ jmpb(entry); 3743 3744 __ bind(loop); 3745 // check if current entry is for same object 3746 __ cmpptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); 3747 // if same object then stop searching 3748 __ jcc(Assembler::equal, found); 3749 // otherwise advance to next entry 3750 __ addptr(c_rarg1, entry_size); 3751 __ bind(entry); 3752 // check if bottom reached 3753 __ cmpptr(c_rarg1, c_rarg2); 3754 // if not at bottom then check this entry 3755 __ jcc(Assembler::notEqual, loop); 3756 } 3757 3758 // error handling. Unlocking was not block-structured 3759 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 3760 InterpreterRuntime::throw_illegal_monitor_state_exception)); 3761 __ should_not_reach_here(); 3762 3763 // call run-time routine 3764 // rsi: points to monitor entry 3765 __ bind(found); 3766 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps) 3767 __ unlock_object(c_rarg1); 3768 __ pop_ptr(rax); // discard object 3769 } 3770 3771 3772 // Wide instructions 3773 void TemplateTable::wide() { 3774 transition(vtos, vtos); 3775 __ load_unsigned_byte(rbx, at_bcp(1)); 3776 __ lea(rscratch1, ExternalAddress((address)Interpreter::_wentry_point)); 3777 __ jmp(Address(rscratch1, rbx, Address::times_8)); 3778 // Note: the r13 increment step is part of the individual wide 3779 // bytecode implementations 3780 } 3781 3782 3783 // Multi arrays 3784 void TemplateTable::multianewarray() { 3785 transition(vtos, atos); 3786 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions 3787 // last dim is on top of stack; we want address of first one: 3788 // first_addr = last_addr + (ndims - 1) * wordSize 3789 __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize)); 3790 call_VM(rax, 3791 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), 3792 c_rarg1); 3793 __ load_unsigned_byte(rbx, at_bcp(3)); 3794 __ lea(rsp, Address(rsp, rbx, Address::times_8)); 3795 } 3796 #endif // !CC_INTERP