1 /* 2 * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016, 2017 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterRuntime.hpp" 30 #include "interpreter/interp_masm.hpp" 31 #include "interpreter/templateTable.hpp" 32 #include "memory/universe.inline.hpp" 33 #include "oops/objArrayKlass.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "prims/methodHandles.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/stubRoutines.hpp" 38 #include "runtime/synchronizer.hpp" 39 40 #ifdef PRODUCT 41 #define __ _masm-> 42 #define BLOCK_COMMENT(str) 43 #define BIND(label) __ bind(label); 44 #else 45 #define __ (PRODUCT_ONLY(false&&)Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)-> 46 #define BLOCK_COMMENT(str) __ block_comment(str) 47 #define BIND(label) __ bind(label); BLOCK_COMMENT(#label ":") 48 #endif 49 50 // The assumed minimum size of a BranchTableBlock. 51 // The actual size of each block heavily depends on the CPU capabilities and, 52 // of course, on the logic implemented in each block. 53 #ifdef ASSERT 54 #define BTB_MINSIZE 256 55 #else 56 #define BTB_MINSIZE 64 57 #endif 58 59 #ifdef ASSERT 60 // Macro to open a BranchTableBlock (a piece of code that is branched to by a calculated branch). 61 #define BTB_BEGIN(lbl, alignment, name) \ 62 __ align_address(alignment); \ 63 __ bind(lbl); \ 64 { unsigned int b_off = __ offset(); \ 65 uintptr_t b_addr = (uintptr_t)__ pc(); \ 66 __ z_larl(Z_R0, (int64_t)0); /* Check current address alignment. */ \ 67 __ z_slgr(Z_R0, br_tab); /* Current Address must be equal */ \ 68 __ z_slgr(Z_R0, flags); /* to calculated branch target. */ \ 69 __ z_brc(Assembler::bcondLogZero, 3); /* skip trap if ok. */ \ 70 __ z_illtrap(0x55); \ 71 guarantee(b_addr%alignment == 0, "bad alignment at begin of block" name); 72 73 // Macro to close a BranchTableBlock (a piece of code that is branched to by a calculated branch). 74 #define BTB_END(lbl, alignment, name) \ 75 uintptr_t e_addr = (uintptr_t)__ pc(); \ 76 unsigned int e_off = __ offset(); \ 77 unsigned int len = e_off-b_off; \ 78 if (len > alignment) { \ 79 tty->print_cr("%4d of %4d @ " INTPTR_FORMAT ": Block len for %s", \ 80 len, alignment, e_addr-len, name); \ 81 guarantee(len <= alignment, "block too large"); \ 82 } \ 83 guarantee(len == e_addr-b_addr, "block len mismatch"); \ 84 } 85 #else 86 // Macro to open a BranchTableBlock (a piece of code that is branched to by a calculated branch). 87 #define BTB_BEGIN(lbl, alignment, name) \ 88 __ align_address(alignment); \ 89 __ bind(lbl); \ 90 { unsigned int b_off = __ offset(); \ 91 uintptr_t b_addr = (uintptr_t)__ pc(); \ 92 guarantee(b_addr%alignment == 0, "bad alignment at begin of block" name); 93 94 // Macro to close a BranchTableBlock (a piece of code that is branched to by a calculated branch). 95 #define BTB_END(lbl, alignment, name) \ 96 uintptr_t e_addr = (uintptr_t)__ pc(); \ 97 unsigned int e_off = __ offset(); \ 98 unsigned int len = e_off-b_off; \ 99 if (len > alignment) { \ 100 tty->print_cr("%4d of %4d @ " INTPTR_FORMAT ": Block len for %s", \ 101 len, alignment, e_addr-len, name); \ 102 guarantee(len <= alignment, "block too large"); \ 103 } \ 104 guarantee(len == e_addr-b_addr, "block len mismatch"); \ 105 } 106 #endif // ASSERT 107 108 // Platform-dependent initialization. 109 110 void TemplateTable::pd_initialize() { 111 // No specific initialization. 112 } 113 114 // Address computation: local variables 115 116 static inline Address iaddress(int n) { 117 return Address(Z_locals, Interpreter::local_offset_in_bytes(n)); 118 } 119 120 static inline Address laddress(int n) { 121 return iaddress(n + 1); 122 } 123 124 static inline Address faddress(int n) { 125 return iaddress(n); 126 } 127 128 static inline Address daddress(int n) { 129 return laddress(n); 130 } 131 132 static inline Address aaddress(int n) { 133 return iaddress(n); 134 } 135 136 // Pass NULL, if no shift instruction should be emitted. 137 static inline Address iaddress(InterpreterMacroAssembler *masm, Register r) { 138 if (masm) { 139 masm->z_sllg(r, r, LogBytesPerWord); // index2bytes 140 } 141 return Address(Z_locals, r, Interpreter::local_offset_in_bytes(0)); 142 } 143 144 // Pass NULL, if no shift instruction should be emitted. 145 static inline Address laddress(InterpreterMacroAssembler *masm, Register r) { 146 if (masm) { 147 masm->z_sllg(r, r, LogBytesPerWord); // index2bytes 148 } 149 return Address(Z_locals, r, Interpreter::local_offset_in_bytes(1) ); 150 } 151 152 static inline Address faddress(InterpreterMacroAssembler *masm, Register r) { 153 return iaddress(masm, r); 154 } 155 156 static inline Address daddress(InterpreterMacroAssembler *masm, Register r) { 157 return laddress(masm, r); 158 } 159 160 static inline Address aaddress(InterpreterMacroAssembler *masm, Register r) { 161 return iaddress(masm, r); 162 } 163 164 // At top of Java expression stack which may be different than esp(). It 165 // isn't for category 1 objects. 166 static inline Address at_tos(int slot = 0) { 167 return Address(Z_esp, Interpreter::expr_offset_in_bytes(slot)); 168 } 169 170 // Condition conversion 171 static Assembler::branch_condition j_not(TemplateTable::Condition cc) { 172 switch (cc) { 173 case TemplateTable::equal : 174 return Assembler::bcondNotEqual; 175 case TemplateTable::not_equal : 176 return Assembler::bcondEqual; 177 case TemplateTable::less : 178 return Assembler::bcondNotLow; 179 case TemplateTable::less_equal : 180 return Assembler::bcondHigh; 181 case TemplateTable::greater : 182 return Assembler::bcondNotHigh; 183 case TemplateTable::greater_equal: 184 return Assembler::bcondLow; 185 } 186 ShouldNotReachHere(); 187 return Assembler::bcondZero; 188 } 189 190 // Do an oop store like *(base + offset) = val 191 // offset can be a register or a constant. 192 static void do_oop_store(InterpreterMacroAssembler* _masm, 193 Register base, 194 RegisterOrConstant offset, 195 Register val, 196 bool val_is_null, // == false does not guarantee that val really is not equal NULL. 197 Register tmp1, // If tmp3 is volatile, either tmp1 or tmp2 must be 198 Register tmp2, // non-volatile to hold a copy of pre_val across runtime calls. 199 Register tmp3, // Ideally, this tmp register is non-volatile, as it is used to 200 // hold pre_val (must survive runtime calls). 201 BarrierSet::Name barrier, 202 bool precise) { 203 BLOCK_COMMENT("do_oop_store {"); 204 assert(val != noreg, "val must always be valid, even if it is zero"); 205 assert_different_registers(tmp1, tmp2, tmp3, val, base, offset.register_or_noreg()); 206 __ verify_oop(val); 207 switch (barrier) { 208 #if INCLUDE_ALL_GCS 209 case BarrierSet::G1SATBCTLogging: 210 { 211 #ifdef ASSERT 212 if (val_is_null) { // Check if the flag setting reflects reality. 213 Label OK; 214 __ z_ltgr(val, val); 215 __ z_bre(OK); 216 __ z_illtrap(0x11); 217 __ bind(OK); 218 } 219 #endif 220 Register pre_val = tmp3; 221 // Load and record the previous value. 222 __ g1_write_barrier_pre(base, offset, pre_val, val, 223 tmp1, tmp2, 224 false); // Needs to hold pre_val in non_volatile register? 225 226 if (val_is_null) { 227 __ store_heap_oop_null(val, offset, base); 228 } else { 229 Label Done; 230 // val_is_null == false does not guarantee that val really is not equal NULL. 231 // Checking for this case dynamically has some cost, but also some benefit (in GC). 232 // It's hard to say if cost or benefit is greater. 233 { Label OK; 234 __ z_ltgr(val, val); 235 __ z_brne(OK); 236 __ store_heap_oop_null(val, offset, base); 237 __ z_bru(Done); 238 __ bind(OK); 239 } 240 // G1 barrier needs uncompressed oop for region cross check. 241 // Store_heap_oop compresses the oop in the argument register. 242 Register val_work = val; 243 if (UseCompressedOops) { 244 val_work = tmp3; 245 __ z_lgr(val_work, val); 246 } 247 __ store_heap_oop_not_null(val_work, offset, base); 248 249 // We need precise card marks for oop array stores. 250 // Otherwise, cardmarking the object which contains the oop is sufficient. 251 if (precise && !(offset.is_constant() && offset.as_constant() == 0)) { 252 __ add2reg_with_index(base, 253 offset.constant_or_zero(), 254 offset.register_or_noreg(), 255 base); 256 } 257 __ g1_write_barrier_post(base /* store_adr */, val, tmp1, tmp2, tmp3); 258 __ bind(Done); 259 } 260 } 261 break; 262 #endif // INCLUDE_ALL_GCS 263 case BarrierSet::CardTableForRS: 264 case BarrierSet::CardTableExtension: 265 { 266 if (val_is_null) { 267 __ store_heap_oop_null(val, offset, base); 268 } else { 269 __ store_heap_oop(val, offset, base); 270 // Flatten object address if needed. 271 if (precise && ((offset.register_or_noreg() != noreg) || (offset.constant_or_zero() != 0))) { 272 __ load_address(base, Address(base, offset.register_or_noreg(), offset.constant_or_zero())); 273 } 274 __ card_write_barrier_post(base, tmp1); 275 } 276 } 277 break; 278 case BarrierSet::ModRef: 279 // fall through 280 default: 281 ShouldNotReachHere(); 282 283 } 284 BLOCK_COMMENT("} do_oop_store"); 285 } 286 287 Address TemplateTable::at_bcp(int offset) { 288 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); 289 return Address(Z_bcp, offset); 290 } 291 292 void TemplateTable::patch_bytecode(Bytecodes::Code bc, 293 Register bc_reg, 294 Register temp_reg, 295 bool load_bc_into_bc_reg, // = true 296 int byte_no) { 297 if (!RewriteBytecodes) { return; } 298 299 NearLabel L_patch_done; 300 BLOCK_COMMENT("patch_bytecode {"); 301 302 switch (bc) { 303 case Bytecodes::_fast_aputfield: 304 case Bytecodes::_fast_bputfield: 305 case Bytecodes::_fast_zputfield: 306 case Bytecodes::_fast_cputfield: 307 case Bytecodes::_fast_dputfield: 308 case Bytecodes::_fast_fputfield: 309 case Bytecodes::_fast_iputfield: 310 case Bytecodes::_fast_lputfield: 311 case Bytecodes::_fast_sputfield: 312 { 313 // We skip bytecode quickening for putfield instructions when 314 // the put_code written to the constant pool cache is zero. 315 // This is required so that every execution of this instruction 316 // calls out to InterpreterRuntime::resolve_get_put to do 317 // additional, required work. 318 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 319 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 320 __ get_cache_and_index_and_bytecode_at_bcp(Z_R1_scratch, bc_reg, 321 temp_reg, byte_no, 1); 322 __ load_const_optimized(bc_reg, bc); 323 __ compareU32_and_branch(temp_reg, (intptr_t)0, 324 Assembler::bcondZero, L_patch_done); 325 } 326 break; 327 default: 328 assert(byte_no == -1, "sanity"); 329 // The pair bytecodes have already done the load. 330 if (load_bc_into_bc_reg) { 331 __ load_const_optimized(bc_reg, bc); 332 } 333 break; 334 } 335 336 if (JvmtiExport::can_post_breakpoint()) { 337 338 Label L_fast_patch; 339 340 // If a breakpoint is present we can't rewrite the stream directly. 341 __ z_cli(at_bcp(0), Bytecodes::_breakpoint); 342 __ z_brne(L_fast_patch); 343 __ get_method(temp_reg); 344 // Let breakpoint table handling rewrite to quicker bytecode. 345 __ call_VM_static(noreg, 346 CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), 347 temp_reg, Z_R13, bc_reg); 348 __ z_bru(L_patch_done); 349 350 __ bind(L_fast_patch); 351 } 352 353 #ifdef ASSERT 354 NearLabel L_okay; 355 356 // We load into 64 bits, since this works on any CPU. 357 __ z_llgc(temp_reg, at_bcp(0)); 358 __ compareU32_and_branch(temp_reg, Bytecodes::java_code(bc), 359 Assembler::bcondEqual, L_okay ); 360 __ compareU32_and_branch(temp_reg, bc_reg, Assembler::bcondEqual, L_okay); 361 __ stop_static("patching the wrong bytecode"); 362 __ bind(L_okay); 363 #endif 364 365 // Patch bytecode. 366 __ z_stc(bc_reg, at_bcp(0)); 367 368 __ bind(L_patch_done); 369 BLOCK_COMMENT("} patch_bytecode"); 370 } 371 372 // Individual instructions 373 374 void TemplateTable::nop() { 375 transition(vtos, vtos); 376 } 377 378 void TemplateTable::shouldnotreachhere() { 379 transition(vtos, vtos); 380 __ stop("shouldnotreachhere bytecode"); 381 } 382 383 void TemplateTable::aconst_null() { 384 transition(vtos, atos); 385 __ clear_reg(Z_tos, true, false); 386 } 387 388 void TemplateTable::iconst(int value) { 389 transition(vtos, itos); 390 // Zero extension of the iconst makes zero extension at runtime obsolete. 391 __ load_const_optimized(Z_tos, ((unsigned long)(unsigned int)value)); 392 } 393 394 void TemplateTable::lconst(int value) { 395 transition(vtos, ltos); 396 __ load_const_optimized(Z_tos, value); 397 } 398 399 // No pc-relative load/store for floats. 400 void TemplateTable::fconst(int value) { 401 transition(vtos, ftos); 402 static float one = 1.0f, two = 2.0f; 403 404 switch (value) { 405 case 0: 406 __ z_lzer(Z_ftos); 407 return; 408 case 1: 409 __ load_absolute_address(Z_R1_scratch, (address) &one); 410 __ mem2freg_opt(Z_ftos, Address(Z_R1_scratch), false); 411 return; 412 case 2: 413 __ load_absolute_address(Z_R1_scratch, (address) &two); 414 __ mem2freg_opt(Z_ftos, Address(Z_R1_scratch), false); 415 return; 416 default: 417 ShouldNotReachHere(); 418 return; 419 } 420 } 421 422 void TemplateTable::dconst(int value) { 423 transition(vtos, dtos); 424 static double one = 1.0; 425 426 switch (value) { 427 case 0: 428 __ z_lzdr(Z_ftos); 429 return; 430 case 1: 431 __ load_absolute_address(Z_R1_scratch, (address) &one); 432 __ mem2freg_opt(Z_ftos, Address(Z_R1_scratch)); 433 return; 434 default: 435 ShouldNotReachHere(); 436 return; 437 } 438 } 439 440 void TemplateTable::bipush() { 441 transition(vtos, itos); 442 __ z_lb(Z_tos, at_bcp(1)); 443 } 444 445 void TemplateTable::sipush() { 446 transition(vtos, itos); 447 __ get_2_byte_integer_at_bcp(Z_tos, 1, InterpreterMacroAssembler::Signed); 448 } 449 450 451 void TemplateTable::ldc(bool wide) { 452 transition(vtos, vtos); 453 Label call_ldc, notFloat, notClass, Done; 454 const Register RcpIndex = Z_tmp_1; 455 const Register Rtags = Z_ARG2; 456 457 if (wide) { 458 __ get_2_byte_integer_at_bcp(RcpIndex, 1, InterpreterMacroAssembler::Unsigned); 459 } else { 460 __ z_llgc(RcpIndex, at_bcp(1)); 461 } 462 463 __ get_cpool_and_tags(Z_tmp_2, Rtags); 464 465 const int base_offset = ConstantPool::header_size() * wordSize; 466 const int tags_offset = Array<u1>::base_offset_in_bytes(); 467 const Register Raddr_type = Rtags; 468 469 // Get address of type. 470 __ add2reg_with_index(Raddr_type, tags_offset, RcpIndex, Rtags); 471 472 __ z_cli(0, Raddr_type, JVM_CONSTANT_UnresolvedClass); 473 __ z_bre(call_ldc); // Unresolved class - get the resolved class. 474 475 __ z_cli(0, Raddr_type, JVM_CONSTANT_UnresolvedClassInError); 476 __ z_bre(call_ldc); // Unresolved class in error state - call into runtime 477 // to throw the error from the first resolution attempt. 478 479 __ z_cli(0, Raddr_type, JVM_CONSTANT_Class); 480 __ z_brne(notClass); // Resolved class - need to call vm to get java 481 // mirror of the class. 482 483 // We deal with a class. Call vm to do the appropriate. 484 __ bind(call_ldc); 485 __ load_const_optimized(Z_ARG2, wide); 486 call_VM(Z_RET, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), Z_ARG2); 487 __ push_ptr(Z_RET); 488 __ z_bru(Done); 489 490 // Not a class. 491 __ bind(notClass); 492 Register RcpOffset = RcpIndex; 493 __ z_sllg(RcpOffset, RcpIndex, LogBytesPerWord); // Convert index to offset. 494 __ z_cli(0, Raddr_type, JVM_CONSTANT_Float); 495 __ z_brne(notFloat); 496 497 // ftos 498 __ mem2freg_opt(Z_ftos, Address(Z_tmp_2, RcpOffset, base_offset), false); 499 __ push_f(); 500 __ z_bru(Done); 501 502 __ bind(notFloat); 503 #ifdef ASSERT 504 { 505 Label L; 506 507 __ z_cli(0, Raddr_type, JVM_CONSTANT_Integer); 508 __ z_bre(L); 509 // String and Object are rewritten to fast_aldc. 510 __ stop("unexpected tag type in ldc"); 511 512 __ bind(L); 513 } 514 #endif 515 516 // itos 517 __ mem2reg_opt(Z_tos, Address(Z_tmp_2, RcpOffset, base_offset), false); 518 __ push_i(Z_tos); 519 520 __ bind(Done); 521 } 522 523 // Fast path for caching oop constants. 524 // %%% We should use this to handle Class and String constants also. 525 // %%% It will simplify the ldc/primitive path considerably. 526 void TemplateTable::fast_aldc(bool wide) { 527 transition(vtos, atos); 528 529 const Register index = Z_tmp_2; 530 int index_size = wide ? sizeof(u2) : sizeof(u1); 531 Label L_resolved; 532 533 // We are resolved if the resolved reference cache entry contains a 534 // non-null object (CallSite, etc.). 535 __ get_cache_index_at_bcp(index, 1, index_size); // Load index. 536 __ load_resolved_reference_at_index(Z_tos, index); 537 __ z_ltgr(Z_tos, Z_tos); 538 __ z_brne(L_resolved); 539 540 // First time invocation - must resolve first. 541 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 542 __ load_const_optimized(Z_ARG1, (int)bytecode()); 543 __ call_VM(Z_tos, entry, Z_ARG1); 544 545 __ bind(L_resolved); 546 __ verify_oop(Z_tos); 547 } 548 549 void TemplateTable::ldc2_w() { 550 transition(vtos, vtos); 551 Label Long, Done; 552 553 // Z_tmp_1 = index of cp entry 554 __ get_2_byte_integer_at_bcp(Z_tmp_1, 1, InterpreterMacroAssembler::Unsigned); 555 556 __ get_cpool_and_tags(Z_tmp_2, Z_tos); 557 558 const int base_offset = ConstantPool::header_size() * wordSize; 559 const int tags_offset = Array<u1>::base_offset_in_bytes(); 560 561 // Get address of type. 562 __ add2reg_with_index(Z_tos, tags_offset, Z_tos, Z_tmp_1); 563 564 // Index needed in both branches, so calculate here. 565 __ z_sllg(Z_tmp_1, Z_tmp_1, LogBytesPerWord); // index2bytes 566 567 // Check type. 568 __ z_cli(0, Z_tos, JVM_CONSTANT_Double); 569 __ z_brne(Long); 570 571 // dtos 572 __ mem2freg_opt(Z_ftos, Address(Z_tmp_2, Z_tmp_1, base_offset)); 573 __ push_d(); 574 __ z_bru(Done); 575 576 __ bind(Long); 577 // ltos 578 __ mem2reg_opt(Z_tos, Address(Z_tmp_2, Z_tmp_1, base_offset)); 579 __ push_l(); 580 581 __ bind(Done); 582 } 583 584 void TemplateTable::locals_index(Register reg, int offset) { 585 __ z_llgc(reg, at_bcp(offset)); 586 __ z_lcgr(reg); 587 } 588 589 void TemplateTable::iload() { 590 iload_internal(); 591 } 592 593 void TemplateTable::nofast_iload() { 594 iload_internal(may_not_rewrite); 595 } 596 597 void TemplateTable::iload_internal(RewriteControl rc) { 598 transition(vtos, itos); 599 600 if (RewriteFrequentPairs && rc == may_rewrite) { 601 NearLabel rewrite, done; 602 const Register bc = Z_ARG4; 603 604 assert(Z_R1_scratch != bc, "register damaged"); 605 606 // Get next byte. 607 __ z_llgc(Z_R1_scratch, at_bcp(Bytecodes::length_for (Bytecodes::_iload))); 608 609 // If _iload, wait to rewrite to iload2. We only want to rewrite the 610 // last two iloads in a pair. Comparing against fast_iload means that 611 // the next bytecode is neither an iload or a caload, and therefore 612 // an iload pair. 613 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_iload, 614 Assembler::bcondEqual, done); 615 616 __ load_const_optimized(bc, Bytecodes::_fast_iload2); 617 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_fast_iload, 618 Assembler::bcondEqual, rewrite); 619 620 // If _caload, rewrite to fast_icaload. 621 __ load_const_optimized(bc, Bytecodes::_fast_icaload); 622 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_caload, 623 Assembler::bcondEqual, rewrite); 624 625 // Rewrite so iload doesn't check again. 626 __ load_const_optimized(bc, Bytecodes::_fast_iload); 627 628 // rewrite 629 // bc: fast bytecode 630 __ bind(rewrite); 631 patch_bytecode(Bytecodes::_iload, bc, Z_R1_scratch, false); 632 633 __ bind(done); 634 635 } 636 637 // Get the local value into tos. 638 locals_index(Z_R1_scratch); 639 __ mem2reg_opt(Z_tos, iaddress(_masm, Z_R1_scratch), false); 640 } 641 642 void TemplateTable::fast_iload2() { 643 transition(vtos, itos); 644 645 locals_index(Z_R1_scratch); 646 __ mem2reg_opt(Z_tos, iaddress(_masm, Z_R1_scratch), false); 647 __ push_i(Z_tos); 648 locals_index(Z_R1_scratch, 3); 649 __ mem2reg_opt(Z_tos, iaddress(_masm, Z_R1_scratch), false); 650 } 651 652 void TemplateTable::fast_iload() { 653 transition(vtos, itos); 654 655 locals_index(Z_R1_scratch); 656 __ mem2reg_opt(Z_tos, iaddress(_masm, Z_R1_scratch), false); 657 } 658 659 void TemplateTable::lload() { 660 transition(vtos, ltos); 661 662 locals_index(Z_R1_scratch); 663 __ mem2reg_opt(Z_tos, laddress(_masm, Z_R1_scratch)); 664 } 665 666 void TemplateTable::fload() { 667 transition(vtos, ftos); 668 669 locals_index(Z_R1_scratch); 670 __ mem2freg_opt(Z_ftos, faddress(_masm, Z_R1_scratch), false); 671 } 672 673 void TemplateTable::dload() { 674 transition(vtos, dtos); 675 676 locals_index(Z_R1_scratch); 677 __ mem2freg_opt(Z_ftos, daddress(_masm, Z_R1_scratch)); 678 } 679 680 void TemplateTable::aload() { 681 transition(vtos, atos); 682 683 locals_index(Z_R1_scratch); 684 __ mem2reg_opt(Z_tos, aaddress(_masm, Z_R1_scratch)); 685 } 686 687 void TemplateTable::locals_index_wide(Register reg) { 688 __ get_2_byte_integer_at_bcp(reg, 2, InterpreterMacroAssembler::Unsigned); 689 __ z_lcgr(reg); 690 } 691 692 void TemplateTable::wide_iload() { 693 transition(vtos, itos); 694 695 locals_index_wide(Z_tmp_1); 696 __ mem2reg_opt(Z_tos, iaddress(_masm, Z_tmp_1), false); 697 } 698 699 void TemplateTable::wide_lload() { 700 transition(vtos, ltos); 701 702 locals_index_wide(Z_tmp_1); 703 __ mem2reg_opt(Z_tos, laddress(_masm, Z_tmp_1)); 704 } 705 706 void TemplateTable::wide_fload() { 707 transition(vtos, ftos); 708 709 locals_index_wide(Z_tmp_1); 710 __ mem2freg_opt(Z_ftos, faddress(_masm, Z_tmp_1), false); 711 } 712 713 void TemplateTable::wide_dload() { 714 transition(vtos, dtos); 715 716 locals_index_wide(Z_tmp_1); 717 __ mem2freg_opt(Z_ftos, daddress(_masm, Z_tmp_1)); 718 } 719 720 void TemplateTable::wide_aload() { 721 transition(vtos, atos); 722 723 locals_index_wide(Z_tmp_1); 724 __ mem2reg_opt(Z_tos, aaddress(_masm, Z_tmp_1)); 725 } 726 727 void TemplateTable::index_check(Register array, Register index, unsigned int shift) { 728 assert_different_registers(Z_R1_scratch, array, index); 729 730 // Check array. 731 __ null_check(array, Z_R0_scratch, arrayOopDesc::length_offset_in_bytes()); 732 733 // Sign extend index for use by indexed load. 734 __ z_lgfr(index, index); 735 736 // Check index. 737 Label index_ok; 738 __ z_cl(index, Address(array, arrayOopDesc::length_offset_in_bytes())); 739 __ z_brl(index_ok); 740 __ lgr_if_needed(Z_ARG3, index); // See generate_ArrayIndexOutOfBounds_handler(). 741 // Give back the array to create more detailed exceptions. 742 __ lgr_if_needed(Z_ARG2, array); // See generate_ArrayIndexOutOfBounds_handler(). 743 __ load_absolute_address(Z_R1_scratch, 744 Interpreter::_throw_ArrayIndexOutOfBoundsException_entry); 745 __ z_bcr(Assembler::bcondAlways, Z_R1_scratch); 746 __ bind(index_ok); 747 748 if (shift > 0) 749 __ z_sllg(index, index, shift); 750 } 751 752 void TemplateTable::iaload() { 753 transition(itos, itos); 754 755 __ pop_ptr(Z_tmp_1); // array 756 // Index is in Z_tos. 757 Register index = Z_tos; 758 index_check(Z_tmp_1, index, LogBytesPerInt); // Kills Z_ARG3. 759 // Load the value. 760 __ mem2reg_opt(Z_tos, 761 Address(Z_tmp_1, index, arrayOopDesc::base_offset_in_bytes(T_INT)), 762 false); 763 } 764 765 void TemplateTable::laload() { 766 transition(itos, ltos); 767 768 __ pop_ptr(Z_tmp_2); 769 // Z_tos : index 770 // Z_tmp_2 : array 771 Register index = Z_tos; 772 index_check(Z_tmp_2, index, LogBytesPerLong); 773 __ mem2reg_opt(Z_tos, 774 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_LONG))); 775 } 776 777 void TemplateTable::faload() { 778 transition(itos, ftos); 779 780 __ pop_ptr(Z_tmp_2); 781 // Z_tos : index 782 // Z_tmp_2 : array 783 Register index = Z_tos; 784 index_check(Z_tmp_2, index, LogBytesPerInt); 785 __ mem2freg_opt(Z_ftos, 786 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_FLOAT)), 787 false); 788 } 789 790 void TemplateTable::daload() { 791 transition(itos, dtos); 792 793 __ pop_ptr(Z_tmp_2); 794 // Z_tos : index 795 // Z_tmp_2 : array 796 Register index = Z_tos; 797 index_check(Z_tmp_2, index, LogBytesPerLong); 798 __ mem2freg_opt(Z_ftos, 799 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_DOUBLE))); 800 } 801 802 void TemplateTable::aaload() { 803 transition(itos, atos); 804 805 unsigned const int shift = LogBytesPerHeapOop; 806 __ pop_ptr(Z_tmp_1); // array 807 // Index is in Z_tos. 808 Register index = Z_tos; 809 index_check(Z_tmp_1, index, shift); 810 // Now load array element. 811 __ load_heap_oop(Z_tos, 812 Address(Z_tmp_1, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 813 __ verify_oop(Z_tos); 814 } 815 816 void TemplateTable::baload() { 817 transition(itos, itos); 818 819 __ pop_ptr(Z_tmp_1); 820 // Z_tos : index 821 // Z_tmp_1 : array 822 Register index = Z_tos; 823 index_check(Z_tmp_1, index, 0); 824 __ z_lb(Z_tos, 825 Address(Z_tmp_1, index, arrayOopDesc::base_offset_in_bytes(T_BYTE))); 826 } 827 828 void TemplateTable::caload() { 829 transition(itos, itos); 830 831 __ pop_ptr(Z_tmp_2); 832 // Z_tos : index 833 // Z_tmp_2 : array 834 Register index = Z_tos; 835 index_check(Z_tmp_2, index, LogBytesPerShort); 836 // Load into 64 bits, works on all CPUs. 837 __ z_llgh(Z_tos, 838 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 839 } 840 841 // Iload followed by caload frequent pair. 842 void TemplateTable::fast_icaload() { 843 transition(vtos, itos); 844 845 // Load index out of locals. 846 locals_index(Z_R1_scratch); 847 __ mem2reg_opt(Z_ARG3, iaddress(_masm, Z_R1_scratch), false); 848 // Z_ARG3 : index 849 // Z_tmp_2 : array 850 __ pop_ptr(Z_tmp_2); 851 index_check(Z_tmp_2, Z_ARG3, LogBytesPerShort); 852 // Load into 64 bits, works on all CPUs. 853 __ z_llgh(Z_tos, 854 Address(Z_tmp_2, Z_ARG3, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 855 } 856 857 void TemplateTable::saload() { 858 transition(itos, itos); 859 860 __ pop_ptr(Z_tmp_2); 861 // Z_tos : index 862 // Z_tmp_2 : array 863 Register index = Z_tos; 864 index_check(Z_tmp_2, index, LogBytesPerShort); 865 __ z_lh(Z_tos, 866 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_SHORT))); 867 } 868 869 void TemplateTable::iload(int n) { 870 transition(vtos, itos); 871 __ z_ly(Z_tos, iaddress(n)); 872 } 873 874 void TemplateTable::lload(int n) { 875 transition(vtos, ltos); 876 __ z_lg(Z_tos, laddress(n)); 877 } 878 879 void TemplateTable::fload(int n) { 880 transition(vtos, ftos); 881 __ mem2freg_opt(Z_ftos, faddress(n), false); 882 } 883 884 void TemplateTable::dload(int n) { 885 transition(vtos, dtos); 886 __ mem2freg_opt(Z_ftos, daddress(n)); 887 } 888 889 void TemplateTable::aload(int n) { 890 transition(vtos, atos); 891 __ mem2reg_opt(Z_tos, aaddress(n)); 892 } 893 894 void TemplateTable::aload_0() { 895 aload_0_internal(); 896 } 897 898 void TemplateTable::nofast_aload_0() { 899 aload_0_internal(may_not_rewrite); 900 } 901 902 void TemplateTable::aload_0_internal(RewriteControl rc) { 903 transition(vtos, atos); 904 905 // According to bytecode histograms, the pairs: 906 // 907 // _aload_0, _fast_igetfield 908 // _aload_0, _fast_agetfield 909 // _aload_0, _fast_fgetfield 910 // 911 // occur frequently. If RewriteFrequentPairs is set, the (slow) 912 // _aload_0 bytecode checks if the next bytecode is either 913 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then 914 // rewrites the current bytecode into a pair bytecode; otherwise it 915 // rewrites the current bytecode into _fast_aload_0 that doesn't do 916 // the pair check anymore. 917 // 918 // Note: If the next bytecode is _getfield, the rewrite must be 919 // delayed, otherwise we may miss an opportunity for a pair. 920 // 921 // Also rewrite frequent pairs 922 // aload_0, aload_1 923 // aload_0, iload_1 924 // These bytecodes with a small amount of code are most profitable 925 // to rewrite. 926 if (!(RewriteFrequentPairs && (rc == may_rewrite))) { 927 aload(0); 928 return; 929 } 930 931 NearLabel rewrite, done; 932 const Register bc = Z_ARG4; 933 934 assert(Z_R1_scratch != bc, "register damaged"); 935 // Get next byte. 936 __ z_llgc(Z_R1_scratch, at_bcp(Bytecodes::length_for (Bytecodes::_aload_0))); 937 938 // Do actual aload_0. 939 aload(0); 940 941 // If _getfield then wait with rewrite. 942 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_getfield, 943 Assembler::bcondEqual, done); 944 945 // If _igetfield then rewrite to _fast_iaccess_0. 946 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) 947 == Bytecodes::_aload_0, "fix bytecode definition"); 948 949 __ load_const_optimized(bc, Bytecodes::_fast_iaccess_0); 950 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_fast_igetfield, 951 Assembler::bcondEqual, rewrite); 952 953 // If _agetfield then rewrite to _fast_aaccess_0. 954 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) 955 == Bytecodes::_aload_0, "fix bytecode definition"); 956 957 __ load_const_optimized(bc, Bytecodes::_fast_aaccess_0); 958 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_fast_agetfield, 959 Assembler::bcondEqual, rewrite); 960 961 // If _fgetfield then rewrite to _fast_faccess_0. 962 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) 963 == Bytecodes::_aload_0, "fix bytecode definition"); 964 965 __ load_const_optimized(bc, Bytecodes::_fast_faccess_0); 966 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_fast_fgetfield, 967 Assembler::bcondEqual, rewrite); 968 969 // Else rewrite to _fast_aload0. 970 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) 971 == Bytecodes::_aload_0, "fix bytecode definition"); 972 __ load_const_optimized(bc, Bytecodes::_fast_aload_0); 973 974 // rewrite 975 // bc: fast bytecode 976 __ bind(rewrite); 977 978 patch_bytecode(Bytecodes::_aload_0, bc, Z_R1_scratch, false); 979 // Reload local 0 because of VM call inside patch_bytecode(). 980 // this may trigger GC and thus change the oop. 981 aload(0); 982 983 __ bind(done); 984 } 985 986 void TemplateTable::istore() { 987 transition(itos, vtos); 988 locals_index(Z_R1_scratch); 989 __ reg2mem_opt(Z_tos, iaddress(_masm, Z_R1_scratch), false); 990 } 991 992 void TemplateTable::lstore() { 993 transition(ltos, vtos); 994 locals_index(Z_R1_scratch); 995 __ reg2mem_opt(Z_tos, laddress(_masm, Z_R1_scratch)); 996 } 997 998 void TemplateTable::fstore() { 999 transition(ftos, vtos); 1000 locals_index(Z_R1_scratch); 1001 __ freg2mem_opt(Z_ftos, faddress(_masm, Z_R1_scratch)); 1002 } 1003 1004 void TemplateTable::dstore() { 1005 transition(dtos, vtos); 1006 locals_index(Z_R1_scratch); 1007 __ freg2mem_opt(Z_ftos, daddress(_masm, Z_R1_scratch)); 1008 } 1009 1010 void TemplateTable::astore() { 1011 transition(vtos, vtos); 1012 __ pop_ptr(Z_tos); 1013 locals_index(Z_R1_scratch); 1014 __ reg2mem_opt(Z_tos, aaddress(_masm, Z_R1_scratch)); 1015 } 1016 1017 void TemplateTable::wide_istore() { 1018 transition(vtos, vtos); 1019 __ pop_i(Z_tos); 1020 locals_index_wide(Z_tmp_1); 1021 __ reg2mem_opt(Z_tos, iaddress(_masm, Z_tmp_1), false); 1022 } 1023 1024 void TemplateTable::wide_lstore() { 1025 transition(vtos, vtos); 1026 __ pop_l(Z_tos); 1027 locals_index_wide(Z_tmp_1); 1028 __ reg2mem_opt(Z_tos, laddress(_masm, Z_tmp_1)); 1029 } 1030 1031 void TemplateTable::wide_fstore() { 1032 transition(vtos, vtos); 1033 __ pop_f(Z_ftos); 1034 locals_index_wide(Z_tmp_1); 1035 __ freg2mem_opt(Z_ftos, faddress(_masm, Z_tmp_1), false); 1036 } 1037 1038 void TemplateTable::wide_dstore() { 1039 transition(vtos, vtos); 1040 __ pop_d(Z_ftos); 1041 locals_index_wide(Z_tmp_1); 1042 __ freg2mem_opt(Z_ftos, daddress(_masm, Z_tmp_1)); 1043 } 1044 1045 void TemplateTable::wide_astore() { 1046 transition(vtos, vtos); 1047 __ pop_ptr(Z_tos); 1048 locals_index_wide(Z_tmp_1); 1049 __ reg2mem_opt(Z_tos, aaddress(_masm, Z_tmp_1)); 1050 } 1051 1052 void TemplateTable::iastore() { 1053 transition(itos, vtos); 1054 1055 Register index = Z_ARG3; // Index_check expects index in Z_ARG3. 1056 // Value is in Z_tos ... 1057 __ pop_i(index); // index 1058 __ pop_ptr(Z_tmp_1); // array 1059 index_check(Z_tmp_1, index, LogBytesPerInt); 1060 // ... and then move the value. 1061 __ reg2mem_opt(Z_tos, 1062 Address(Z_tmp_1, index, arrayOopDesc::base_offset_in_bytes(T_INT)), 1063 false); 1064 } 1065 1066 void TemplateTable::lastore() { 1067 transition(ltos, vtos); 1068 1069 __ pop_i(Z_ARG3); 1070 __ pop_ptr(Z_tmp_2); 1071 // Z_tos : value 1072 // Z_ARG3 : index 1073 // Z_tmp_2 : array 1074 index_check(Z_tmp_2, Z_ARG3, LogBytesPerLong); // Prefer index in Z_ARG3. 1075 __ reg2mem_opt(Z_tos, 1076 Address(Z_tmp_2, Z_ARG3, arrayOopDesc::base_offset_in_bytes(T_LONG))); 1077 } 1078 1079 void TemplateTable::fastore() { 1080 transition(ftos, vtos); 1081 1082 __ pop_i(Z_ARG3); 1083 __ pop_ptr(Z_tmp_2); 1084 // Z_ftos : value 1085 // Z_ARG3 : index 1086 // Z_tmp_2 : array 1087 index_check(Z_tmp_2, Z_ARG3, LogBytesPerInt); // Prefer index in Z_ARG3. 1088 __ freg2mem_opt(Z_ftos, 1089 Address(Z_tmp_2, Z_ARG3, arrayOopDesc::base_offset_in_bytes(T_FLOAT)), 1090 false); 1091 } 1092 1093 void TemplateTable::dastore() { 1094 transition(dtos, vtos); 1095 1096 __ pop_i(Z_ARG3); 1097 __ pop_ptr(Z_tmp_2); 1098 // Z_ftos : value 1099 // Z_ARG3 : index 1100 // Z_tmp_2 : array 1101 index_check(Z_tmp_2, Z_ARG3, LogBytesPerLong); // Prefer index in Z_ARG3. 1102 __ freg2mem_opt(Z_ftos, 1103 Address(Z_tmp_2, Z_ARG3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE))); 1104 } 1105 1106 void TemplateTable::aastore() { 1107 NearLabel is_null, ok_is_subtype, done; 1108 transition(vtos, vtos); 1109 1110 // stack: ..., array, index, value 1111 1112 Register Rvalue = Z_tos; 1113 Register Rarray = Z_ARG2; 1114 Register Rindex = Z_ARG3; // Convention for index_check(). 1115 1116 __ load_ptr(0, Rvalue); 1117 __ z_l(Rindex, Address(Z_esp, Interpreter::expr_offset_in_bytes(1))); 1118 __ load_ptr(2, Rarray); 1119 1120 unsigned const int shift = LogBytesPerHeapOop; 1121 index_check(Rarray, Rindex, shift); // side effect: Rindex = Rindex << shift 1122 Register Rstore_addr = Rindex; 1123 // Address where the store goes to, i.e. &(Rarry[index]) 1124 __ load_address(Rstore_addr, Address(Rarray, Rindex, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 1125 1126 // do array store check - check for NULL value first. 1127 __ compareU64_and_branch(Rvalue, (intptr_t)0, Assembler::bcondEqual, is_null); 1128 1129 Register Rsub_klass = Z_ARG4; 1130 Register Rsuper_klass = Z_ARG5; 1131 __ load_klass(Rsub_klass, Rvalue); 1132 // Load superklass. 1133 __ load_klass(Rsuper_klass, Rarray); 1134 __ z_lg(Rsuper_klass, Address(Rsuper_klass, ObjArrayKlass::element_klass_offset())); 1135 1136 // Generate a fast subtype check. Branch to ok_is_subtype if no failure. 1137 // Throw if failure. 1138 Register tmp1 = Z_tmp_1; 1139 Register tmp2 = Z_tmp_2; 1140 __ gen_subtype_check(Rsub_klass, Rsuper_klass, tmp1, tmp2, ok_is_subtype); 1141 1142 // Fall through on failure. 1143 // Object is in Rvalue == Z_tos. 1144 assert(Rvalue == Z_tos, "that's the expected location"); 1145 __ load_absolute_address(tmp1, Interpreter::_throw_ArrayStoreException_entry); 1146 __ z_br(tmp1); 1147 1148 // Come here on success. 1149 __ bind(ok_is_subtype); 1150 1151 // Now store using the appropriate barrier. 1152 Register tmp3 = Rsub_klass; 1153 do_oop_store(_masm, Rstore_addr, (intptr_t)0/*offset*/, Rvalue, false/*val==null*/, 1154 tmp3, tmp2, tmp1, _bs->kind(), true); 1155 __ z_bru(done); 1156 1157 // Have a NULL in Rvalue. 1158 __ bind(is_null); 1159 __ profile_null_seen(tmp1); 1160 1161 // Store a NULL. 1162 do_oop_store(_masm, Rstore_addr, (intptr_t)0/*offset*/, Rvalue, true/*val==null*/, 1163 tmp3, tmp2, tmp1, _bs->kind(), true); 1164 1165 // Pop stack arguments. 1166 __ bind(done); 1167 __ add2reg(Z_esp, 3 * Interpreter::stackElementSize); 1168 } 1169 1170 1171 void TemplateTable::bastore() { 1172 transition(itos, vtos); 1173 1174 __ pop_i(Z_ARG3); 1175 __ pop_ptr(Z_tmp_2); 1176 // Z_tos : value 1177 // Z_ARG3 : index 1178 // Z_tmp_2 : array 1179 1180 // Need to check whether array is boolean or byte 1181 // since both types share the bastore bytecode. 1182 __ load_klass(Z_tmp_1, Z_tmp_2); 1183 __ z_llgf(Z_tmp_1, Address(Z_tmp_1, Klass::layout_helper_offset())); 1184 __ z_tmll(Z_tmp_1, Klass::layout_helper_boolean_diffbit()); 1185 Label L_skip; 1186 __ z_bfalse(L_skip); 1187 // if it is a T_BOOLEAN array, mask the stored value to 0/1 1188 __ z_nilf(Z_tos, 0x1); 1189 __ bind(L_skip); 1190 1191 // No index shift necessary - pass 0. 1192 index_check(Z_tmp_2, Z_ARG3, 0); // Prefer index in Z_ARG3. 1193 __ z_stc(Z_tos, 1194 Address(Z_tmp_2, Z_ARG3, arrayOopDesc::base_offset_in_bytes(T_BYTE))); 1195 } 1196 1197 void TemplateTable::castore() { 1198 transition(itos, vtos); 1199 1200 __ pop_i(Z_ARG3); 1201 __ pop_ptr(Z_tmp_2); 1202 // Z_tos : value 1203 // Z_ARG3 : index 1204 // Z_tmp_2 : array 1205 Register index = Z_ARG3; // prefer index in Z_ARG3 1206 index_check(Z_tmp_2, index, LogBytesPerShort); 1207 __ z_sth(Z_tos, 1208 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 1209 } 1210 1211 void TemplateTable::sastore() { 1212 castore(); 1213 } 1214 1215 void TemplateTable::istore(int n) { 1216 transition(itos, vtos); 1217 __ reg2mem_opt(Z_tos, iaddress(n), false); 1218 } 1219 1220 void TemplateTable::lstore(int n) { 1221 transition(ltos, vtos); 1222 __ reg2mem_opt(Z_tos, laddress(n)); 1223 } 1224 1225 void TemplateTable::fstore(int n) { 1226 transition(ftos, vtos); 1227 __ freg2mem_opt(Z_ftos, faddress(n), false); 1228 } 1229 1230 void TemplateTable::dstore(int n) { 1231 transition(dtos, vtos); 1232 __ freg2mem_opt(Z_ftos, daddress(n)); 1233 } 1234 1235 void TemplateTable::astore(int n) { 1236 transition(vtos, vtos); 1237 __ pop_ptr(Z_tos); 1238 __ reg2mem_opt(Z_tos, aaddress(n)); 1239 } 1240 1241 void TemplateTable::pop() { 1242 transition(vtos, vtos); 1243 __ add2reg(Z_esp, Interpreter::stackElementSize); 1244 } 1245 1246 void TemplateTable::pop2() { 1247 transition(vtos, vtos); 1248 __ add2reg(Z_esp, 2 * Interpreter::stackElementSize); 1249 } 1250 1251 void TemplateTable::dup() { 1252 transition(vtos, vtos); 1253 __ load_ptr(0, Z_tos); 1254 __ push_ptr(Z_tos); 1255 // stack: ..., a, a 1256 } 1257 1258 void TemplateTable::dup_x1() { 1259 transition(vtos, vtos); 1260 1261 // stack: ..., a, b 1262 __ load_ptr(0, Z_tos); // load b 1263 __ load_ptr(1, Z_R0_scratch); // load a 1264 __ store_ptr(1, Z_tos); // store b 1265 __ store_ptr(0, Z_R0_scratch); // store a 1266 __ push_ptr(Z_tos); // push b 1267 // stack: ..., b, a, b 1268 } 1269 1270 void TemplateTable::dup_x2() { 1271 transition(vtos, vtos); 1272 1273 // stack: ..., a, b, c 1274 __ load_ptr(0, Z_R0_scratch); // load c 1275 __ load_ptr(2, Z_R1_scratch); // load a 1276 __ store_ptr(2, Z_R0_scratch); // store c in a 1277 __ push_ptr(Z_R0_scratch); // push c 1278 // stack: ..., c, b, c, c 1279 __ load_ptr(2, Z_R0_scratch); // load b 1280 __ store_ptr(2, Z_R1_scratch); // store a in b 1281 // stack: ..., c, a, c, c 1282 __ store_ptr(1, Z_R0_scratch); // store b in c 1283 // stack: ..., c, a, b, c 1284 } 1285 1286 void TemplateTable::dup2() { 1287 transition(vtos, vtos); 1288 1289 // stack: ..., a, b 1290 __ load_ptr(1, Z_R0_scratch); // load a 1291 __ push_ptr(Z_R0_scratch); // push a 1292 __ load_ptr(1, Z_R0_scratch); // load b 1293 __ push_ptr(Z_R0_scratch); // push b 1294 // stack: ..., a, b, a, b 1295 } 1296 1297 void TemplateTable::dup2_x1() { 1298 transition(vtos, vtos); 1299 1300 // stack: ..., a, b, c 1301 __ load_ptr(0, Z_R0_scratch); // load c 1302 __ load_ptr(1, Z_R1_scratch); // load b 1303 __ push_ptr(Z_R1_scratch); // push b 1304 __ push_ptr(Z_R0_scratch); // push c 1305 // stack: ..., a, b, c, b, c 1306 __ store_ptr(3, Z_R0_scratch); // store c in b 1307 // stack: ..., a, c, c, b, c 1308 __ load_ptr( 4, Z_R0_scratch); // load a 1309 __ store_ptr(2, Z_R0_scratch); // store a in 2nd c 1310 // stack: ..., a, c, a, b, c 1311 __ store_ptr(4, Z_R1_scratch); // store b in a 1312 // stack: ..., b, c, a, b, c 1313 } 1314 1315 void TemplateTable::dup2_x2() { 1316 transition(vtos, vtos); 1317 1318 // stack: ..., a, b, c, d 1319 __ load_ptr(0, Z_R0_scratch); // load d 1320 __ load_ptr(1, Z_R1_scratch); // load c 1321 __ push_ptr(Z_R1_scratch); // push c 1322 __ push_ptr(Z_R0_scratch); // push d 1323 // stack: ..., a, b, c, d, c, d 1324 __ load_ptr(4, Z_R1_scratch); // load b 1325 __ store_ptr(2, Z_R1_scratch); // store b in d 1326 __ store_ptr(4, Z_R0_scratch); // store d in b 1327 // stack: ..., a, d, c, b, c, d 1328 __ load_ptr(5, Z_R0_scratch); // load a 1329 __ load_ptr(3, Z_R1_scratch); // load c 1330 __ store_ptr(3, Z_R0_scratch); // store a in c 1331 __ store_ptr(5, Z_R1_scratch); // store c in a 1332 // stack: ..., c, d, a, b, c, d 1333 } 1334 1335 void TemplateTable::swap() { 1336 transition(vtos, vtos); 1337 1338 // stack: ..., a, b 1339 __ load_ptr(1, Z_R0_scratch); // load a 1340 __ load_ptr(0, Z_R1_scratch); // load b 1341 __ store_ptr(0, Z_R0_scratch); // store a in b 1342 __ store_ptr(1, Z_R1_scratch); // store b in a 1343 // stack: ..., b, a 1344 } 1345 1346 void TemplateTable::iop2(Operation op) { 1347 transition(itos, itos); 1348 switch (op) { 1349 case add : __ z_ay(Z_tos, __ stackTop()); __ pop_i(); break; 1350 case sub : __ z_sy(Z_tos, __ stackTop()); __ pop_i(); __ z_lcr(Z_tos, Z_tos); break; 1351 case mul : __ z_msy(Z_tos, __ stackTop()); __ pop_i(); break; 1352 case _and : __ z_ny(Z_tos, __ stackTop()); __ pop_i(); break; 1353 case _or : __ z_oy(Z_tos, __ stackTop()); __ pop_i(); break; 1354 case _xor : __ z_xy(Z_tos, __ stackTop()); __ pop_i(); break; 1355 case shl : __ z_lr(Z_tmp_1, Z_tos); 1356 __ z_nill(Z_tmp_1, 31); // Lowest 5 bits are shiftamount. 1357 __ pop_i(Z_tos); __ z_sll(Z_tos, 0, Z_tmp_1); break; 1358 case shr : __ z_lr(Z_tmp_1, Z_tos); 1359 __ z_nill(Z_tmp_1, 31); // Lowest 5 bits are shiftamount. 1360 __ pop_i(Z_tos); __ z_sra(Z_tos, 0, Z_tmp_1); break; 1361 case ushr : __ z_lr(Z_tmp_1, Z_tos); 1362 __ z_nill(Z_tmp_1, 31); // Lowest 5 bits are shiftamount. 1363 __ pop_i(Z_tos); __ z_srl(Z_tos, 0, Z_tmp_1); break; 1364 default : ShouldNotReachHere(); break; 1365 } 1366 return; 1367 } 1368 1369 void TemplateTable::lop2(Operation op) { 1370 transition(ltos, ltos); 1371 1372 switch (op) { 1373 case add : __ z_ag(Z_tos, __ stackTop()); __ pop_l(); break; 1374 case sub : __ z_sg(Z_tos, __ stackTop()); __ pop_l(); __ z_lcgr(Z_tos, Z_tos); break; 1375 case mul : __ z_msg(Z_tos, __ stackTop()); __ pop_l(); break; 1376 case _and : __ z_ng(Z_tos, __ stackTop()); __ pop_l(); break; 1377 case _or : __ z_og(Z_tos, __ stackTop()); __ pop_l(); break; 1378 case _xor : __ z_xg(Z_tos, __ stackTop()); __ pop_l(); break; 1379 default : ShouldNotReachHere(); break; 1380 } 1381 return; 1382 } 1383 1384 // Common part of idiv/irem. 1385 static void idiv_helper(InterpreterMacroAssembler * _masm, address exception) { 1386 NearLabel not_null; 1387 1388 // Use register pair Z_tmp_1, Z_tmp_2 for DIVIDE SINGLE. 1389 assert(Z_tmp_1->successor() == Z_tmp_2, " need even/odd register pair for idiv/irem"); 1390 1391 // Get dividend. 1392 __ pop_i(Z_tmp_2); 1393 1394 // If divisor == 0 throw exception. 1395 __ compare32_and_branch(Z_tos, (intptr_t) 0, 1396 Assembler::bcondNotEqual, not_null ); 1397 __ load_absolute_address(Z_R1_scratch, exception); 1398 __ z_br(Z_R1_scratch); 1399 1400 __ bind(not_null); 1401 1402 __ z_lgfr(Z_tmp_2, Z_tmp_2); // Sign extend dividend. 1403 __ z_dsgfr(Z_tmp_1, Z_tos); // Do it. 1404 } 1405 1406 void TemplateTable::idiv() { 1407 transition(itos, itos); 1408 1409 idiv_helper(_masm, Interpreter::_throw_ArithmeticException_entry); 1410 __ z_llgfr(Z_tos, Z_tmp_2); // Result is in Z_tmp_2. 1411 } 1412 1413 void TemplateTable::irem() { 1414 transition(itos, itos); 1415 1416 idiv_helper(_masm, Interpreter::_throw_ArithmeticException_entry); 1417 __ z_llgfr(Z_tos, Z_tmp_1); // Result is in Z_tmp_1. 1418 } 1419 1420 void TemplateTable::lmul() { 1421 transition(ltos, ltos); 1422 1423 // Multiply with memory operand. 1424 __ z_msg(Z_tos, __ stackTop()); 1425 __ pop_l(); // Pop operand. 1426 } 1427 1428 // Common part of ldiv/lrem. 1429 // 1430 // Input: 1431 // Z_tos := the divisor (dividend still on stack) 1432 // 1433 // Updated registers: 1434 // Z_tmp_1 := pop_l() % Z_tos ; if is_ldiv == false 1435 // Z_tmp_2 := pop_l() / Z_tos ; if is_ldiv == true 1436 // 1437 static void ldiv_helper(InterpreterMacroAssembler * _masm, address exception, bool is_ldiv) { 1438 NearLabel not_null, done; 1439 1440 // Use register pair Z_tmp_1, Z_tmp_2 for DIVIDE SINGLE. 1441 assert(Z_tmp_1->successor() == Z_tmp_2, 1442 " need even/odd register pair for idiv/irem"); 1443 1444 // Get dividend. 1445 __ pop_l(Z_tmp_2); 1446 1447 // If divisor == 0 throw exception. 1448 __ compare64_and_branch(Z_tos, (intptr_t)0, Assembler::bcondNotEqual, not_null); 1449 __ load_absolute_address(Z_R1_scratch, exception); 1450 __ z_br(Z_R1_scratch); 1451 1452 __ bind(not_null); 1453 // Special case for dividend == 0x8000 and divisor == -1. 1454 if (is_ldiv) { 1455 // result := Z_tmp_2 := - dividend 1456 __ z_lcgr(Z_tmp_2, Z_tmp_2); 1457 } else { 1458 // result remainder := Z_tmp_1 := 0 1459 __ clear_reg(Z_tmp_1, true, false); // Don't set CC. 1460 } 1461 1462 // if divisor == -1 goto done 1463 __ compare64_and_branch(Z_tos, -1, Assembler::bcondEqual, done); 1464 if (is_ldiv) 1465 // Restore sign, because divisor != -1. 1466 __ z_lcgr(Z_tmp_2, Z_tmp_2); 1467 __ z_dsgr(Z_tmp_1, Z_tos); // Do it. 1468 __ bind(done); 1469 } 1470 1471 void TemplateTable::ldiv() { 1472 transition(ltos, ltos); 1473 1474 ldiv_helper(_masm, Interpreter::_throw_ArithmeticException_entry, true /*is_ldiv*/); 1475 __ z_lgr(Z_tos, Z_tmp_2); // Result is in Z_tmp_2. 1476 } 1477 1478 void TemplateTable::lrem() { 1479 transition(ltos, ltos); 1480 1481 ldiv_helper(_masm, Interpreter::_throw_ArithmeticException_entry, false /*is_ldiv*/); 1482 __ z_lgr(Z_tos, Z_tmp_1); // Result is in Z_tmp_1. 1483 } 1484 1485 void TemplateTable::lshl() { 1486 transition(itos, ltos); 1487 1488 // Z_tos: shift amount 1489 __ pop_l(Z_tmp_1); // Get shift value. 1490 __ z_sllg(Z_tos, Z_tmp_1, 0, Z_tos); 1491 } 1492 1493 void TemplateTable::lshr() { 1494 transition(itos, ltos); 1495 1496 // Z_tos: shift amount 1497 __ pop_l(Z_tmp_1); // Get shift value. 1498 __ z_srag(Z_tos, Z_tmp_1, 0, Z_tos); 1499 } 1500 1501 void TemplateTable::lushr() { 1502 transition(itos, ltos); 1503 1504 // Z_tos: shift amount 1505 __ pop_l(Z_tmp_1); // Get shift value. 1506 __ z_srlg(Z_tos, Z_tmp_1, 0, Z_tos); 1507 } 1508 1509 void TemplateTable::fop2(Operation op) { 1510 transition(ftos, ftos); 1511 1512 switch (op) { 1513 case add: 1514 // Add memory operand. 1515 __ z_aeb(Z_ftos, __ stackTop()); __ pop_f(); return; 1516 case sub: 1517 // Sub memory operand. 1518 __ z_ler(Z_F1, Z_ftos); // first operand 1519 __ pop_f(Z_ftos); // second operand from stack 1520 __ z_sebr(Z_ftos, Z_F1); 1521 return; 1522 case mul: 1523 // Multiply with memory operand. 1524 __ z_meeb(Z_ftos, __ stackTop()); __ pop_f(); return; 1525 case div: 1526 __ z_ler(Z_F1, Z_ftos); // first operand 1527 __ pop_f(Z_ftos); // second operand from stack 1528 __ z_debr(Z_ftos, Z_F1); 1529 return; 1530 case rem: 1531 // Do runtime call. 1532 __ z_ler(Z_FARG2, Z_ftos); // divisor 1533 __ pop_f(Z_FARG1); // dividend 1534 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1535 // Result should be in the right place (Z_ftos == Z_FRET). 1536 return; 1537 default: 1538 ShouldNotReachHere(); 1539 return; 1540 } 1541 } 1542 1543 void TemplateTable::dop2(Operation op) { 1544 transition(dtos, dtos); 1545 1546 switch (op) { 1547 case add: 1548 // Add memory operand. 1549 __ z_adb(Z_ftos, __ stackTop()); __ pop_d(); return; 1550 case sub: 1551 // Sub memory operand. 1552 __ z_ldr(Z_F1, Z_ftos); // first operand 1553 __ pop_d(Z_ftos); // second operand from stack 1554 __ z_sdbr(Z_ftos, Z_F1); 1555 return; 1556 case mul: 1557 // Multiply with memory operand. 1558 __ z_mdb(Z_ftos, __ stackTop()); __ pop_d(); return; 1559 case div: 1560 __ z_ldr(Z_F1, Z_ftos); // first operand 1561 __ pop_d(Z_ftos); // second operand from stack 1562 __ z_ddbr(Z_ftos, Z_F1); 1563 return; 1564 case rem: 1565 // Do runtime call. 1566 __ z_ldr(Z_FARG2, Z_ftos); // divisor 1567 __ pop_d(Z_FARG1); // dividend 1568 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1569 // Result should be in the right place (Z_ftos == Z_FRET). 1570 return; 1571 default: 1572 ShouldNotReachHere(); 1573 return; 1574 } 1575 } 1576 1577 void TemplateTable::ineg() { 1578 transition(itos, itos); 1579 __ z_lcr(Z_tos); 1580 } 1581 1582 void TemplateTable::lneg() { 1583 transition(ltos, ltos); 1584 __ z_lcgr(Z_tos); 1585 } 1586 1587 void TemplateTable::fneg() { 1588 transition(ftos, ftos); 1589 __ z_lcebr(Z_ftos, Z_ftos); 1590 } 1591 1592 void TemplateTable::dneg() { 1593 transition(dtos, dtos); 1594 __ z_lcdbr(Z_ftos, Z_ftos); 1595 } 1596 1597 void TemplateTable::iinc() { 1598 transition(vtos, vtos); 1599 1600 Address local; 1601 __ z_lb(Z_R0_scratch, at_bcp(2)); // Get constant. 1602 locals_index(Z_R1_scratch); 1603 local = iaddress(_masm, Z_R1_scratch); 1604 __ z_a(Z_R0_scratch, local); 1605 __ reg2mem_opt(Z_R0_scratch, local, false); 1606 } 1607 1608 void TemplateTable::wide_iinc() { 1609 transition(vtos, vtos); 1610 1611 // Z_tmp_1 := increment 1612 __ get_2_byte_integer_at_bcp(Z_tmp_1, 4, InterpreterMacroAssembler::Signed); 1613 // Z_R1_scratch := index of local to increment 1614 locals_index_wide(Z_tmp_2); 1615 // Load, increment, and store. 1616 __ access_local_int(Z_tmp_2, Z_tos); 1617 __ z_agr(Z_tos, Z_tmp_1); 1618 // Shifted index is still in Z_tmp_2. 1619 __ reg2mem_opt(Z_tos, Address(Z_locals, Z_tmp_2), false); 1620 } 1621 1622 1623 void TemplateTable::convert() { 1624 // Checking 1625 #ifdef ASSERT 1626 TosState tos_in = ilgl; 1627 TosState tos_out = ilgl; 1628 1629 switch (bytecode()) { 1630 case Bytecodes::_i2l: 1631 case Bytecodes::_i2f: 1632 case Bytecodes::_i2d: 1633 case Bytecodes::_i2b: 1634 case Bytecodes::_i2c: 1635 case Bytecodes::_i2s: 1636 tos_in = itos; 1637 break; 1638 case Bytecodes::_l2i: 1639 case Bytecodes::_l2f: 1640 case Bytecodes::_l2d: 1641 tos_in = ltos; 1642 break; 1643 case Bytecodes::_f2i: 1644 case Bytecodes::_f2l: 1645 case Bytecodes::_f2d: 1646 tos_in = ftos; 1647 break; 1648 case Bytecodes::_d2i: 1649 case Bytecodes::_d2l: 1650 case Bytecodes::_d2f: 1651 tos_in = dtos; 1652 break; 1653 default : 1654 ShouldNotReachHere(); 1655 } 1656 switch (bytecode()) { 1657 case Bytecodes::_l2i: 1658 case Bytecodes::_f2i: 1659 case Bytecodes::_d2i: 1660 case Bytecodes::_i2b: 1661 case Bytecodes::_i2c: 1662 case Bytecodes::_i2s: 1663 tos_out = itos; 1664 break; 1665 case Bytecodes::_i2l: 1666 case Bytecodes::_f2l: 1667 case Bytecodes::_d2l: 1668 tos_out = ltos; 1669 break; 1670 case Bytecodes::_i2f: 1671 case Bytecodes::_l2f: 1672 case Bytecodes::_d2f: 1673 tos_out = ftos; 1674 break; 1675 case Bytecodes::_i2d: 1676 case Bytecodes::_l2d: 1677 case Bytecodes::_f2d: 1678 tos_out = dtos; 1679 break; 1680 default : 1681 ShouldNotReachHere(); 1682 } 1683 1684 transition(tos_in, tos_out); 1685 #endif // ASSERT 1686 1687 // Conversion 1688 Label done; 1689 switch (bytecode()) { 1690 case Bytecodes::_i2l: 1691 __ z_lgfr(Z_tos, Z_tos); 1692 return; 1693 case Bytecodes::_i2f: 1694 __ z_cefbr(Z_ftos, Z_tos); 1695 return; 1696 case Bytecodes::_i2d: 1697 __ z_cdfbr(Z_ftos, Z_tos); 1698 return; 1699 case Bytecodes::_i2b: 1700 // Sign extend least significant byte. 1701 __ move_reg_if_needed(Z_tos, T_BYTE, Z_tos, T_INT); 1702 return; 1703 case Bytecodes::_i2c: 1704 // Zero extend 2 least significant bytes. 1705 __ move_reg_if_needed(Z_tos, T_CHAR, Z_tos, T_INT); 1706 return; 1707 case Bytecodes::_i2s: 1708 // Sign extend 2 least significant bytes. 1709 __ move_reg_if_needed(Z_tos, T_SHORT, Z_tos, T_INT); 1710 return; 1711 case Bytecodes::_l2i: 1712 // Sign-extend not needed here, upper 4 bytes of int value in register are ignored. 1713 return; 1714 case Bytecodes::_l2f: 1715 __ z_cegbr(Z_ftos, Z_tos); 1716 return; 1717 case Bytecodes::_l2d: 1718 __ z_cdgbr(Z_ftos, Z_tos); 1719 return; 1720 case Bytecodes::_f2i: 1721 case Bytecodes::_f2l: 1722 __ clear_reg(Z_tos, true, false); // Don't set CC. 1723 __ z_cebr(Z_ftos, Z_ftos); 1724 __ z_brno(done); // NaN -> 0 1725 if (bytecode() == Bytecodes::_f2i) 1726 __ z_cfebr(Z_tos, Z_ftos, Assembler::to_zero); 1727 else // bytecode() == Bytecodes::_f2l 1728 __ z_cgebr(Z_tos, Z_ftos, Assembler::to_zero); 1729 break; 1730 case Bytecodes::_f2d: 1731 __ move_freg_if_needed(Z_ftos, T_DOUBLE, Z_ftos, T_FLOAT); 1732 return; 1733 case Bytecodes::_d2i: 1734 case Bytecodes::_d2l: 1735 __ clear_reg(Z_tos, true, false); // Ddon't set CC. 1736 __ z_cdbr(Z_ftos, Z_ftos); 1737 __ z_brno(done); // NaN -> 0 1738 if (bytecode() == Bytecodes::_d2i) 1739 __ z_cfdbr(Z_tos, Z_ftos, Assembler::to_zero); 1740 else // Bytecodes::_d2l 1741 __ z_cgdbr(Z_tos, Z_ftos, Assembler::to_zero); 1742 break; 1743 case Bytecodes::_d2f: 1744 __ move_freg_if_needed(Z_ftos, T_FLOAT, Z_ftos, T_DOUBLE); 1745 return; 1746 default: 1747 ShouldNotReachHere(); 1748 } 1749 __ bind(done); 1750 } 1751 1752 void TemplateTable::lcmp() { 1753 transition(ltos, itos); 1754 1755 Label done; 1756 Register val1 = Z_R0_scratch; 1757 Register val2 = Z_R1_scratch; 1758 1759 if (VM_Version::has_LoadStoreConditional()) { 1760 __ pop_l(val1); // pop value 1. 1761 __ z_lghi(val2, -1); // lt value 1762 __ z_cgr(val1, Z_tos); // Compare with Z_tos (value 2). Protect CC under all circumstances. 1763 __ z_lghi(val1, 1); // gt value 1764 __ z_lghi(Z_tos, 0); // eq value 1765 1766 __ z_locgr(Z_tos, val1, Assembler::bcondHigh); 1767 __ z_locgr(Z_tos, val2, Assembler::bcondLow); 1768 } else { 1769 __ pop_l(val1); // Pop value 1. 1770 __ z_cgr(val1, Z_tos); // Compare with Z_tos (value 2). Protect CC under all circumstances. 1771 1772 __ z_lghi(Z_tos, 0); // eq value 1773 __ z_bre(done); 1774 1775 __ z_lghi(Z_tos, 1); // gt value 1776 __ z_brh(done); 1777 1778 __ z_lghi(Z_tos, -1); // lt value 1779 } 1780 1781 __ bind(done); 1782 } 1783 1784 1785 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1786 Label done; 1787 1788 if (is_float) { 1789 __ pop_f(Z_FARG2); 1790 __ z_cebr(Z_FARG2, Z_ftos); 1791 } else { 1792 __ pop_d(Z_FARG2); 1793 __ z_cdbr(Z_FARG2, Z_ftos); 1794 } 1795 1796 if (VM_Version::has_LoadStoreConditional()) { 1797 Register one = Z_R0_scratch; 1798 Register minus_one = Z_R1_scratch; 1799 __ z_lghi(minus_one, -1); 1800 __ z_lghi(one, 1); 1801 __ z_lghi(Z_tos, 0); 1802 __ z_locgr(Z_tos, one, unordered_result == 1 ? Assembler::bcondHighOrNotOrdered : Assembler::bcondHigh); 1803 __ z_locgr(Z_tos, minus_one, unordered_result == 1 ? Assembler::bcondLow : Assembler::bcondLowOrNotOrdered); 1804 } else { 1805 // Z_FARG2 == Z_ftos 1806 __ clear_reg(Z_tos, false, false); 1807 __ z_bre(done); 1808 1809 // F_ARG2 > Z_Ftos, or unordered 1810 __ z_lhi(Z_tos, 1); 1811 __ z_brc(unordered_result == 1 ? Assembler::bcondHighOrNotOrdered : Assembler::bcondHigh, done); 1812 1813 // F_ARG2 < Z_FTOS, or unordered 1814 __ z_lhi(Z_tos, -1); 1815 1816 __ bind(done); 1817 } 1818 } 1819 1820 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1821 const Register bumped_count = Z_tmp_1; 1822 const Register method = Z_tmp_2; 1823 const Register m_counters = Z_R1_scratch; 1824 const Register mdo = Z_tos; 1825 1826 BLOCK_COMMENT("TemplateTable::branch {"); 1827 __ get_method(method); 1828 __ profile_taken_branch(mdo, bumped_count); 1829 1830 const ByteSize ctr_offset = InvocationCounter::counter_offset(); 1831 const ByteSize be_offset = MethodCounters::backedge_counter_offset() + ctr_offset; 1832 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() + ctr_offset; 1833 1834 // Get (wide) offset to disp. 1835 const Register disp = Z_ARG5; 1836 if (is_wide) { 1837 __ get_4_byte_integer_at_bcp(disp, 1); 1838 } else { 1839 __ get_2_byte_integer_at_bcp(disp, 1, InterpreterMacroAssembler::Signed); 1840 } 1841 1842 // Handle all the JSR stuff here, then exit. 1843 // It's much shorter and cleaner than intermingling with the 1844 // non-JSR normal-branch stuff occurring below. 1845 if (is_jsr) { 1846 // Compute return address as bci in Z_tos. 1847 __ z_lgr(Z_R1_scratch, Z_bcp); 1848 __ z_sg(Z_R1_scratch, Address(method, Method::const_offset())); 1849 __ add2reg(Z_tos, (is_wide ? 5 : 3) - in_bytes(ConstMethod::codes_offset()), Z_R1_scratch); 1850 1851 // Bump bcp to target of JSR. 1852 __ z_agr(Z_bcp, disp); 1853 // Push return address for "ret" on stack. 1854 __ push_ptr(Z_tos); 1855 // And away we go! 1856 __ dispatch_next(vtos, 0 , true); 1857 return; 1858 } 1859 1860 // Normal (non-jsr) branch handling. 1861 1862 // Bump bytecode pointer by displacement (take the branch). 1863 __ z_agr(Z_bcp, disp); 1864 1865 assert(UseLoopCounter || !UseOnStackReplacement, 1866 "on-stack-replacement requires loop counters"); 1867 1868 NearLabel backedge_counter_overflow; 1869 NearLabel profile_method; 1870 NearLabel dispatch; 1871 int increment = InvocationCounter::count_increment; 1872 1873 if (UseLoopCounter) { 1874 // Increment backedge counter for backward branches. 1875 // disp: target offset 1876 // Z_bcp: target bcp 1877 // Z_locals: locals pointer 1878 // 1879 // Count only if backward branch. 1880 __ compare32_and_branch(disp, (intptr_t)0, Assembler::bcondHigh, dispatch); 1881 1882 if (TieredCompilation) { 1883 Label noCounters; 1884 1885 if (ProfileInterpreter) { 1886 NearLabel no_mdo; 1887 1888 // Are we profiling? 1889 __ load_and_test_long(mdo, Address(method, Method::method_data_offset())); 1890 __ branch_optimized(Assembler::bcondZero, no_mdo); 1891 1892 // Increment the MDO backedge counter. 1893 const Address mdo_backedge_counter(mdo, MethodData::backedge_counter_offset() + InvocationCounter::counter_offset()); 1894 1895 const Address mask(mdo, MethodData::backedge_mask_offset()); 1896 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, 1897 Z_ARG2, false, Assembler::bcondZero, 1898 UseOnStackReplacement ? &backedge_counter_overflow : NULL); 1899 __ z_bru(dispatch); 1900 __ bind(no_mdo); 1901 } 1902 1903 // Increment backedge counter in MethodCounters*. 1904 __ get_method_counters(method, m_counters, noCounters); 1905 const Address mask(m_counters, MethodCounters::backedge_mask_offset()); 1906 __ increment_mask_and_jump(Address(m_counters, be_offset), 1907 increment, mask, 1908 Z_ARG2, false, Assembler::bcondZero, 1909 UseOnStackReplacement ? &backedge_counter_overflow : NULL); 1910 __ bind(noCounters); 1911 } else { 1912 Register counter = Z_tos; 1913 Label noCounters; 1914 // Get address of MethodCounters object. 1915 __ get_method_counters(method, m_counters, noCounters); 1916 // Increment backedge counter. 1917 __ increment_backedge_counter(m_counters, counter); 1918 1919 if (ProfileInterpreter) { 1920 // Test to see if we should create a method data obj. 1921 __ z_cl(counter, Address(m_counters, MethodCounters::interpreter_profile_limit_offset())); 1922 __ z_brl(dispatch); 1923 1924 // If no method data exists, go to profile method. 1925 __ test_method_data_pointer(Z_ARG4/*result unused*/, profile_method); 1926 1927 if (UseOnStackReplacement) { 1928 // Check for overflow against 'bumped_count' which is the MDO taken count. 1929 __ z_cl(bumped_count, Address(m_counters, MethodCounters::interpreter_backward_branch_limit_offset())); 1930 __ z_brl(dispatch); 1931 1932 // When ProfileInterpreter is on, the backedge_count comes 1933 // from the methodDataOop, which value does not get reset on 1934 // the call to frequency_counter_overflow(). To avoid 1935 // excessive calls to the overflow routine while the method is 1936 // being compiled, add a second test to make sure the overflow 1937 // function is called only once every overflow_frequency. 1938 const int overflow_frequency = 1024; 1939 __ and_imm(bumped_count, overflow_frequency - 1); 1940 __ z_brz(backedge_counter_overflow); 1941 1942 } 1943 } else { 1944 if (UseOnStackReplacement) { 1945 // Check for overflow against 'counter', which is the sum of the 1946 // counters. 1947 __ z_cl(counter, Address(m_counters, MethodCounters::interpreter_backward_branch_limit_offset())); 1948 __ z_brh(backedge_counter_overflow); 1949 } 1950 } 1951 __ bind(noCounters); 1952 } 1953 1954 __ bind(dispatch); 1955 } 1956 1957 // Pre-load the next target bytecode into rbx. 1958 __ z_llgc(Z_bytecode, Address(Z_bcp, (intptr_t) 0)); 1959 1960 // Continue with the bytecode @ target. 1961 // Z_tos: Return bci for jsr's, unused otherwise. 1962 // Z_bytecode: target bytecode 1963 // Z_bcp: target bcp 1964 __ dispatch_only(vtos, true); 1965 1966 // Out-of-line code runtime calls. 1967 if (UseLoopCounter) { 1968 if (ProfileInterpreter) { 1969 // Out-of-line code to allocate method data oop. 1970 __ bind(profile_method); 1971 1972 __ call_VM(noreg, 1973 CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1974 __ z_llgc(Z_bytecode, Address(Z_bcp, (intptr_t) 0)); // Restore target bytecode. 1975 __ set_method_data_pointer_for_bcp(); 1976 __ z_bru(dispatch); 1977 } 1978 1979 if (UseOnStackReplacement) { 1980 1981 // invocation counter overflow 1982 __ bind(backedge_counter_overflow); 1983 1984 __ z_lcgr(Z_ARG2, disp); // Z_ARG2 := -disp 1985 __ z_agr(Z_ARG2, Z_bcp); // Z_ARG2 := branch target bcp - disp == branch bcp 1986 __ call_VM(noreg, 1987 CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), 1988 Z_ARG2); 1989 1990 // Z_RET: osr nmethod (osr ok) or NULL (osr not possible). 1991 __ compare64_and_branch(Z_RET, (intptr_t) 0, Assembler::bcondEqual, dispatch); 1992 1993 // Nmethod may have been invalidated (VM may block upon call_VM return). 1994 __ z_cliy(nmethod::state_offset(), Z_RET, nmethod::in_use); 1995 __ z_brne(dispatch); 1996 1997 // Migrate the interpreter frame off of the stack. 1998 1999 __ z_lgr(Z_tmp_1, Z_RET); // Save the nmethod. 2000 2001 call_VM(noreg, 2002 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); 2003 2004 // Z_RET is OSR buffer, move it to expected parameter location. 2005 __ lgr_if_needed(Z_ARG1, Z_RET); 2006 2007 // Pop the interpreter frame ... 2008 __ pop_interpreter_frame(Z_R14, Z_ARG2/*tmp1*/, Z_ARG3/*tmp2*/); 2009 2010 // ... and begin the OSR nmethod. 2011 __ z_lg(Z_R1_scratch, Address(Z_tmp_1, nmethod::osr_entry_point_offset())); 2012 __ z_br(Z_R1_scratch); 2013 } 2014 } 2015 BLOCK_COMMENT("} TemplateTable::branch"); 2016 } 2017 2018 void TemplateTable::if_0cmp(Condition cc) { 2019 transition(itos, vtos); 2020 2021 // Assume branch is more often taken than not (loops use backward branches). 2022 NearLabel not_taken; 2023 __ compare32_and_branch(Z_tos, (intptr_t) 0, j_not(cc), not_taken); 2024 branch(false, false); 2025 __ bind(not_taken); 2026 __ profile_not_taken_branch(Z_tos); 2027 } 2028 2029 void TemplateTable::if_icmp(Condition cc) { 2030 transition(itos, vtos); 2031 2032 // Assume branch is more often taken than not (loops use backward branches). 2033 NearLabel not_taken; 2034 __ pop_i(Z_R0_scratch); 2035 __ compare32_and_branch(Z_R0_scratch, Z_tos, j_not(cc), not_taken); 2036 branch(false, false); 2037 __ bind(not_taken); 2038 __ profile_not_taken_branch(Z_tos); 2039 } 2040 2041 void TemplateTable::if_nullcmp(Condition cc) { 2042 transition(atos, vtos); 2043 2044 // Assume branch is more often taken than not (loops use backward branches) . 2045 NearLabel not_taken; 2046 __ compare64_and_branch(Z_tos, (intptr_t) 0, j_not(cc), not_taken); 2047 branch(false, false); 2048 __ bind(not_taken); 2049 __ profile_not_taken_branch(Z_tos); 2050 } 2051 2052 void TemplateTable::if_acmp(Condition cc) { 2053 transition(atos, vtos); 2054 // Assume branch is more often taken than not (loops use backward branches). 2055 NearLabel not_taken; 2056 __ pop_ptr(Z_ARG2); 2057 __ verify_oop(Z_ARG2); 2058 __ verify_oop(Z_tos); 2059 __ compareU64_and_branch(Z_tos, Z_ARG2, j_not(cc), not_taken); 2060 branch(false, false); 2061 __ bind(not_taken); 2062 __ profile_not_taken_branch(Z_ARG3); 2063 } 2064 2065 void TemplateTable::ret() { 2066 transition(vtos, vtos); 2067 2068 locals_index(Z_tmp_1); 2069 // Get return bci, compute return bcp. Must load 64 bits. 2070 __ mem2reg_opt(Z_tmp_1, iaddress(_masm, Z_tmp_1)); 2071 __ profile_ret(Z_tmp_1, Z_tmp_2); 2072 __ get_method(Z_tos); 2073 __ mem2reg_opt(Z_R1_scratch, Address(Z_tos, Method::const_offset())); 2074 __ load_address(Z_bcp, Address(Z_R1_scratch, Z_tmp_1, ConstMethod::codes_offset())); 2075 __ dispatch_next(vtos, 0 , true); 2076 } 2077 2078 void TemplateTable::wide_ret() { 2079 transition(vtos, vtos); 2080 2081 locals_index_wide(Z_tmp_1); 2082 // Get return bci, compute return bcp. 2083 __ mem2reg_opt(Z_tmp_1, aaddress(_masm, Z_tmp_1)); 2084 __ profile_ret(Z_tmp_1, Z_tmp_2); 2085 __ get_method(Z_tos); 2086 __ mem2reg_opt(Z_R1_scratch, Address(Z_tos, Method::const_offset())); 2087 __ load_address(Z_bcp, Address(Z_R1_scratch, Z_tmp_1, ConstMethod::codes_offset())); 2088 __ dispatch_next(vtos, 0, true); 2089 } 2090 2091 void TemplateTable::tableswitch () { 2092 transition(itos, vtos); 2093 2094 NearLabel default_case, continue_execution; 2095 Register bcp = Z_ARG5; 2096 // Align bcp. 2097 __ load_address(bcp, at_bcp(BytesPerInt)); 2098 __ z_nill(bcp, (-BytesPerInt) & 0xffff); 2099 2100 // Load lo & hi. 2101 Register low = Z_tmp_1; 2102 Register high = Z_tmp_2; 2103 2104 // Load low into 64 bits, since used for address calculation. 2105 __ mem2reg_signed_opt(low, Address(bcp, BytesPerInt)); 2106 __ mem2reg_opt(high, Address(bcp, 2 * BytesPerInt), false); 2107 // Sign extend "label" value for address calculation. 2108 __ z_lgfr(Z_tos, Z_tos); 2109 2110 // Check against lo & hi. 2111 __ compare32_and_branch(Z_tos, low, Assembler::bcondLow, default_case); 2112 __ compare32_and_branch(Z_tos, high, Assembler::bcondHigh, default_case); 2113 2114 // Lookup dispatch offset. 2115 __ z_sgr(Z_tos, low); 2116 Register jump_table_offset = Z_ARG3; 2117 // Index2offset; index in Z_tos is killed by profile_switch_case. 2118 __ z_sllg(jump_table_offset, Z_tos, LogBytesPerInt); 2119 __ profile_switch_case(Z_tos, Z_ARG4 /*tmp for mdp*/, low/*tmp*/, Z_bytecode/*tmp*/); 2120 2121 Register index = Z_tmp_2; 2122 2123 // Load index sign extended for addressing. 2124 __ mem2reg_signed_opt(index, Address(bcp, jump_table_offset, 3 * BytesPerInt)); 2125 2126 // Continue execution. 2127 __ bind(continue_execution); 2128 2129 // Load next bytecode. 2130 __ z_llgc(Z_bytecode, Address(Z_bcp, index)); 2131 __ z_agr(Z_bcp, index); // Advance bcp. 2132 __ dispatch_only(vtos, true); 2133 2134 // Handle default. 2135 __ bind(default_case); 2136 2137 __ profile_switch_default(Z_tos); 2138 __ mem2reg_signed_opt(index, Address(bcp)); 2139 __ z_bru(continue_execution); 2140 } 2141 2142 void TemplateTable::lookupswitch () { 2143 transition(itos, itos); 2144 __ stop("lookupswitch bytecode should have been rewritten"); 2145 } 2146 2147 void TemplateTable::fast_linearswitch () { 2148 transition(itos, vtos); 2149 2150 Label loop_entry, loop, found, continue_execution; 2151 Register bcp = Z_ARG5; 2152 2153 // Align bcp. 2154 __ load_address(bcp, at_bcp(BytesPerInt)); 2155 __ z_nill(bcp, (-BytesPerInt) & 0xffff); 2156 2157 // Start search with last case. 2158 Register current_case_offset = Z_tmp_1; 2159 2160 __ mem2reg_signed_opt(current_case_offset, Address(bcp, BytesPerInt)); 2161 __ z_sllg(current_case_offset, current_case_offset, LogBytesPerWord); // index2bytes 2162 __ z_bru(loop_entry); 2163 2164 // table search 2165 __ bind(loop); 2166 2167 __ z_c(Z_tos, Address(bcp, current_case_offset, 2 * BytesPerInt)); 2168 __ z_bre(found); 2169 2170 __ bind(loop_entry); 2171 __ z_aghi(current_case_offset, -2 * BytesPerInt); // Decrement. 2172 __ z_brnl(loop); 2173 2174 // default case 2175 Register offset = Z_tmp_2; 2176 2177 __ profile_switch_default(Z_tos); 2178 // Load offset sign extended for addressing. 2179 __ mem2reg_signed_opt(offset, Address(bcp)); 2180 __ z_bru(continue_execution); 2181 2182 // Entry found -> get offset. 2183 __ bind(found); 2184 __ mem2reg_signed_opt(offset, Address(bcp, current_case_offset, 3 * BytesPerInt)); 2185 // Profile that this case was taken. 2186 Register current_case_idx = Z_ARG4; 2187 __ z_srlg(current_case_idx, current_case_offset, LogBytesPerWord); // bytes2index 2188 __ profile_switch_case(current_case_idx, Z_tos, bcp, Z_bytecode); 2189 2190 // Continue execution. 2191 __ bind(continue_execution); 2192 2193 // Load next bytecode. 2194 __ z_llgc(Z_bytecode, Address(Z_bcp, offset, 0)); 2195 __ z_agr(Z_bcp, offset); // Advance bcp. 2196 __ dispatch_only(vtos, true); 2197 } 2198 2199 2200 void TemplateTable::fast_binaryswitch() { 2201 2202 transition(itos, vtos); 2203 2204 // Implementation using the following core algorithm: 2205 // 2206 // int binary_search(int key, LookupswitchPair* array, int n) { 2207 // // Binary search according to "Methodik des Programmierens" by 2208 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 2209 // int i = 0; 2210 // int j = n; 2211 // while (i+1 < j) { 2212 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 2213 // // with Q: for all i: 0 <= i < n: key < a[i] 2214 // // where a stands for the array and assuming that the (inexisting) 2215 // // element a[n] is infinitely big. 2216 // int h = (i + j) >> 1; 2217 // // i < h < j 2218 // if (key < array[h].fast_match()) { 2219 // j = h; 2220 // } else { 2221 // i = h; 2222 // } 2223 // } 2224 // // R: a[i] <= key < a[i+1] or Q 2225 // // (i.e., if key is within array, i is the correct index) 2226 // return i; 2227 // } 2228 2229 // Register allocation 2230 // Note: Since we use the indices in address operands, we do all the 2231 // computation in 64 bits. 2232 const Register key = Z_tos; // Already set (tosca). 2233 const Register array = Z_tmp_1; 2234 const Register i = Z_tmp_2; 2235 const Register j = Z_ARG5; 2236 const Register h = Z_ARG4; 2237 const Register temp = Z_R1_scratch; 2238 2239 // Find array start. 2240 __ load_address(array, at_bcp(3 * BytesPerInt)); 2241 __ z_nill(array, (-BytesPerInt) & 0xffff); // align 2242 2243 // Initialize i & j. 2244 __ clear_reg(i, true, false); // i = 0; Don't set CC. 2245 __ mem2reg_signed_opt(j, Address(array, -BytesPerInt)); // j = length(array); 2246 2247 // And start. 2248 Label entry; 2249 __ z_bru(entry); 2250 2251 // binary search loop 2252 { 2253 NearLabel loop; 2254 2255 __ bind(loop); 2256 2257 // int h = (i + j) >> 1; 2258 __ add2reg_with_index(h, 0, i, j); // h = i + j; 2259 __ z_srag(h, h, 1); // h = (i + j) >> 1; 2260 2261 // if (key < array[h].fast_match()) { 2262 // j = h; 2263 // } else { 2264 // i = h; 2265 // } 2266 2267 // Convert array[h].match to native byte-ordering before compare. 2268 __ z_sllg(temp, h, LogBytesPerWord); // index2bytes 2269 __ mem2reg_opt(temp, Address(array, temp), false); 2270 2271 NearLabel else_; 2272 2273 __ compare32_and_branch(key, temp, Assembler::bcondNotLow, else_); 2274 // j = h if (key < array[h].fast_match()) 2275 __ z_lgr(j, h); 2276 __ z_bru(entry); // continue 2277 2278 __ bind(else_); 2279 2280 // i = h if (key >= array[h].fast_match()) 2281 __ z_lgr(i, h); // and fallthrough 2282 2283 // while (i+1 < j) 2284 __ bind(entry); 2285 2286 // if (i + 1 < j) continue search 2287 __ add2reg(h, 1, i); 2288 __ compare64_and_branch(h, j, Assembler::bcondLow, loop); 2289 } 2290 2291 // End of binary search, result index is i (must check again!). 2292 NearLabel default_case; 2293 2294 // h is no longer needed, so use it to hold the byte offset. 2295 __ z_sllg(h, i, LogBytesPerWord); // index2bytes 2296 __ mem2reg_opt(temp, Address(array, h), false); 2297 __ compare32_and_branch(key, temp, Assembler::bcondNotEqual, default_case); 2298 2299 // entry found -> j = offset 2300 __ mem2reg_signed_opt(j, Address(array, h, BytesPerInt)); 2301 __ profile_switch_case(i, key, array, Z_bytecode); 2302 // Load next bytecode. 2303 __ z_llgc(Z_bytecode, Address(Z_bcp, j)); 2304 __ z_agr(Z_bcp, j); // Advance bcp. 2305 __ dispatch_only(vtos, true); 2306 2307 // default case -> j = default offset 2308 __ bind(default_case); 2309 2310 __ profile_switch_default(i); 2311 __ mem2reg_signed_opt(j, Address(array, -2 * BytesPerInt)); 2312 // Load next bytecode. 2313 __ z_llgc(Z_bytecode, Address(Z_bcp, j)); 2314 __ z_agr(Z_bcp, j); // Advance bcp. 2315 __ dispatch_only(vtos, true); 2316 } 2317 2318 void TemplateTable::_return(TosState state) { 2319 transition(state, state); 2320 assert(_desc->calls_vm(), 2321 "inconsistent calls_vm information"); // call in remove_activation 2322 2323 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2324 Register Rthis = Z_ARG2; 2325 Register Rklass = Z_ARG5; 2326 Label skip_register_finalizer; 2327 assert(state == vtos, "only valid state"); 2328 __ z_lg(Rthis, aaddress(0)); 2329 __ load_klass(Rklass, Rthis); 2330 __ testbit(Address(Rklass, Klass::access_flags_offset()), exact_log2(JVM_ACC_HAS_FINALIZER)); 2331 __ z_bfalse(skip_register_finalizer); 2332 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Rthis); 2333 __ bind(skip_register_finalizer); 2334 } 2335 2336 if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) { 2337 Label no_safepoint; 2338 const Address poll_byte_addr(Z_thread, in_bytes(Thread::polling_page_offset()) + 7 /* Big Endian */); 2339 __ z_tm(poll_byte_addr, SafepointMechanism::poll_bit()); 2340 __ z_braz(no_safepoint); 2341 __ push(state); 2342 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)); 2343 __ pop(state); 2344 __ bind(no_safepoint); 2345 } 2346 2347 if (state == itos) { 2348 // Narrow result if state is itos but result type is smaller. 2349 // Need to narrow in the return bytecode rather than in generate_return_entry 2350 // since compiled code callers expect the result to already be narrowed. 2351 __ narrow(Z_tos, Z_tmp_1); /* fall through */ 2352 } 2353 2354 __ remove_activation(state, Z_R14); 2355 __ z_br(Z_R14); 2356 } 2357 2358 // ---------------------------------------------------------------------------- 2359 // NOTE: Cpe_offset is already computed as byte offset, so we must not 2360 // shift it afterwards! 2361 void TemplateTable::resolve_cache_and_index(int byte_no, 2362 Register Rcache, 2363 Register cpe_offset, 2364 size_t index_size) { 2365 BLOCK_COMMENT("resolve_cache_and_index {"); 2366 NearLabel resolved; 2367 const Register bytecode_in_cpcache = Z_R1_scratch; 2368 const int total_f1_offset = in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f1_offset()); 2369 assert_different_registers(Rcache, cpe_offset, bytecode_in_cpcache); 2370 2371 Bytecodes::Code code = bytecode(); 2372 switch (code) { 2373 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; 2374 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; 2375 } 2376 2377 { 2378 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2379 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, cpe_offset, bytecode_in_cpcache, byte_no, 1, index_size); 2380 // Have we resolved this bytecode? 2381 __ compare32_and_branch(bytecode_in_cpcache, (int)code, Assembler::bcondEqual, resolved); 2382 } 2383 2384 // Resolve first time through. 2385 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 2386 __ load_const_optimized(Z_ARG2, (int) code); 2387 __ call_VM(noreg, entry, Z_ARG2); 2388 2389 // Update registers with resolved info. 2390 __ get_cache_and_index_at_bcp(Rcache, cpe_offset, 1, index_size); 2391 __ bind(resolved); 2392 BLOCK_COMMENT("} resolve_cache_and_index"); 2393 } 2394 2395 // The Rcache and index registers must be set before call. 2396 // Index is already a byte offset, don't shift! 2397 void TemplateTable::load_field_cp_cache_entry(Register obj, 2398 Register cache, 2399 Register index, 2400 Register off, 2401 Register flags, 2402 bool is_static = false) { 2403 assert_different_registers(cache, index, flags, off); 2404 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2405 2406 // Field offset 2407 __ mem2reg_opt(off, Address(cache, index, cp_base_offset + ConstantPoolCacheEntry::f2_offset())); 2408 // Flags. Must load 64 bits. 2409 __ mem2reg_opt(flags, Address(cache, index, cp_base_offset + ConstantPoolCacheEntry::flags_offset())); 2410 2411 // klass overwrite register 2412 if (is_static) { 2413 __ mem2reg_opt(obj, Address(cache, index, cp_base_offset + ConstantPoolCacheEntry::f1_offset())); 2414 __ mem2reg_opt(obj, Address(obj, Klass::java_mirror_offset())); 2415 __ resolve_oop_handle(obj); 2416 } 2417 } 2418 2419 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2420 Register method, 2421 Register itable_index, 2422 Register flags, 2423 bool is_invokevirtual, 2424 bool is_invokevfinal, // unused 2425 bool is_invokedynamic) { 2426 BLOCK_COMMENT("load_invoke_cp_cache_entry {"); 2427 // Setup registers. 2428 const Register cache = Z_ARG1; 2429 const Register cpe_offset= flags; 2430 const ByteSize base_off = ConstantPoolCache::base_offset(); 2431 const ByteSize f1_off = ConstantPoolCacheEntry::f1_offset(); 2432 const ByteSize f2_off = ConstantPoolCacheEntry::f2_offset(); 2433 const ByteSize flags_off = ConstantPoolCacheEntry::flags_offset(); 2434 const int method_offset = in_bytes(base_off + ((byte_no == f2_byte) ? f2_off : f1_off)); 2435 const int flags_offset = in_bytes(base_off + flags_off); 2436 // Access constant pool cache fields. 2437 const int index_offset = in_bytes(base_off + f2_off); 2438 2439 assert_different_registers(method, itable_index, flags, cache); 2440 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2441 2442 if (is_invokevfinal) { 2443 // Already resolved. 2444 assert(itable_index == noreg, "register not used"); 2445 __ get_cache_and_index_at_bcp(cache, cpe_offset, 1); 2446 } else { 2447 // Need to resolve. 2448 resolve_cache_and_index(byte_no, cache, cpe_offset, is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2449 } 2450 __ z_lg(method, Address(cache, cpe_offset, method_offset)); 2451 2452 if (itable_index != noreg) { 2453 __ z_lg(itable_index, Address(cache, cpe_offset, index_offset)); 2454 } 2455 2456 // Only load the lower 4 bytes and fill high bytes of flags with zeros. 2457 // Callers depend on this zero-extension!!! 2458 // Attention: overwrites cpe_offset == flags 2459 __ z_llgf(flags, Address(cache, cpe_offset, flags_offset + (BytesPerLong-BytesPerInt))); 2460 2461 BLOCK_COMMENT("} load_invoke_cp_cache_entry"); 2462 } 2463 2464 // The registers cache and index expected to be set before call. 2465 // Correct values of the cache and index registers are preserved. 2466 void TemplateTable::jvmti_post_field_access(Register cache, Register index, 2467 bool is_static, bool has_tos) { 2468 2469 // Do the JVMTI work here to avoid disturbing the register state below. 2470 // We use c_rarg registers here because we want to use the register used in 2471 // the call to the VM 2472 if (!JvmtiExport::can_post_field_access()) { 2473 return; 2474 } 2475 2476 // Check to see if a field access watch has been set before we 2477 // take the time to call into the VM. 2478 Label exit; 2479 assert_different_registers(cache, index, Z_tos); 2480 __ load_absolute_address(Z_tos, (address)JvmtiExport::get_field_access_count_addr()); 2481 __ load_and_test_int(Z_R0, Address(Z_tos)); 2482 __ z_brz(exit); 2483 2484 // Index is returned as byte offset, do not shift! 2485 __ get_cache_and_index_at_bcp(Z_ARG3, Z_R1_scratch, 1); 2486 2487 // cache entry pointer 2488 __ add2reg_with_index(Z_ARG3, 2489 in_bytes(ConstantPoolCache::base_offset()), 2490 Z_ARG3, Z_R1_scratch); 2491 2492 if (is_static) { 2493 __ clear_reg(Z_ARG2, true, false); // NULL object reference. Don't set CC. 2494 } else { 2495 __ mem2reg_opt(Z_ARG2, at_tos()); // Get object pointer without popping it. 2496 __ verify_oop(Z_ARG2); 2497 } 2498 // Z_ARG2: object pointer or NULL 2499 // Z_ARG3: cache entry pointer 2500 __ call_VM(noreg, 2501 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), 2502 Z_ARG2, Z_ARG3); 2503 __ get_cache_and_index_at_bcp(cache, index, 1); 2504 2505 __ bind(exit); 2506 } 2507 2508 void TemplateTable::pop_and_check_object(Register r) { 2509 __ pop_ptr(r); 2510 __ null_check(r); // for field access must check obj. 2511 __ verify_oop(r); 2512 } 2513 2514 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2515 transition(vtos, vtos); 2516 2517 const Register cache = Z_tmp_1; 2518 const Register index = Z_tmp_2; 2519 const Register obj = Z_tmp_1; 2520 const Register off = Z_ARG2; 2521 const Register flags = Z_ARG1; 2522 const Register bc = Z_tmp_1; // Uses same reg as obj, so don't mix them. 2523 2524 resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); 2525 jvmti_post_field_access(cache, index, is_static, false); 2526 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); 2527 2528 if (!is_static) { 2529 // Obj is on the stack. 2530 pop_and_check_object(obj); 2531 } 2532 2533 // Displacement is 0, so any store instruction will be fine on any CPU. 2534 const Address field(obj, off); 2535 2536 Label is_Byte, is_Bool, is_Int, is_Short, is_Char, 2537 is_Long, is_Float, is_Object, is_Double; 2538 Label is_badState8, is_badState9, is_badStateA, is_badStateB, 2539 is_badStateC, is_badStateD, is_badStateE, is_badStateF, 2540 is_badState; 2541 Label branchTable, atosHandler, Done; 2542 Register br_tab = Z_R1_scratch; 2543 bool do_rewrite = !is_static && (rc == may_rewrite); 2544 bool dont_rewrite = (is_static || (rc == may_not_rewrite)); 2545 2546 assert(do_rewrite == !dont_rewrite, "Oops, code is not fit for that"); 2547 assert(btos == 0, "change code, btos != 0"); 2548 2549 // Calculate branch table size. Generated code size depends on ASSERT and on bytecode rewriting. 2550 #ifdef ASSERT 2551 const unsigned int bsize = dont_rewrite ? BTB_MINSIZE*1 : BTB_MINSIZE*4; 2552 #else 2553 const unsigned int bsize = dont_rewrite ? BTB_MINSIZE*1 : BTB_MINSIZE*4; 2554 #endif 2555 2556 // Calculate address of branch table entry and branch there. 2557 { 2558 const int bit_shift = exact_log2(bsize); // Size of each branch table entry. 2559 const int r_bitpos = 63 - bit_shift; 2560 const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::tos_state_bits + 1; 2561 const int n_rotate = (bit_shift-ConstantPoolCacheEntry::tos_state_shift); 2562 __ z_larl(br_tab, branchTable); 2563 __ rotate_then_insert(flags, flags, l_bitpos, r_bitpos, n_rotate, true); 2564 } 2565 __ z_bc(Assembler::bcondAlways, 0, flags, br_tab); 2566 2567 __ align_address(bsize); 2568 BIND(branchTable); 2569 2570 // btos 2571 BTB_BEGIN(is_Byte, bsize, "getfield_or_static:is_Byte"); 2572 __ z_lb(Z_tos, field); 2573 __ push(btos); 2574 // Rewrite bytecode to be faster. 2575 if (do_rewrite) { 2576 patch_bytecode(Bytecodes::_fast_bgetfield, bc, Z_ARG5); 2577 } 2578 __ z_bru(Done); 2579 BTB_END(is_Byte, bsize, "getfield_or_static:is_Byte"); 2580 2581 // ztos 2582 BTB_BEGIN(is_Bool, bsize, "getfield_or_static:is_Bool"); 2583 __ z_lb(Z_tos, field); 2584 __ push(ztos); 2585 // Rewrite bytecode to be faster. 2586 if (do_rewrite) { 2587 // Use btos rewriting, no truncating to t/f bit is needed for getfield. 2588 patch_bytecode(Bytecodes::_fast_bgetfield, bc, Z_ARG5); 2589 } 2590 __ z_bru(Done); 2591 BTB_END(is_Bool, bsize, "getfield_or_static:is_Bool"); 2592 2593 // ctos 2594 BTB_BEGIN(is_Char, bsize, "getfield_or_static:is_Char"); 2595 // Load into 64 bits, works on all CPUs. 2596 __ z_llgh(Z_tos, field); 2597 __ push(ctos); 2598 // Rewrite bytecode to be faster. 2599 if (do_rewrite) { 2600 patch_bytecode(Bytecodes::_fast_cgetfield, bc, Z_ARG5); 2601 } 2602 __ z_bru(Done); 2603 BTB_END(is_Char, bsize, "getfield_or_static:is_Char"); 2604 2605 // stos 2606 BTB_BEGIN(is_Short, bsize, "getfield_or_static:is_Short"); 2607 __ z_lh(Z_tos, field); 2608 __ push(stos); 2609 // Rewrite bytecode to be faster. 2610 if (do_rewrite) { 2611 patch_bytecode(Bytecodes::_fast_sgetfield, bc, Z_ARG5); 2612 } 2613 __ z_bru(Done); 2614 BTB_END(is_Short, bsize, "getfield_or_static:is_Short"); 2615 2616 // itos 2617 BTB_BEGIN(is_Int, bsize, "getfield_or_static:is_Int"); 2618 __ mem2reg_opt(Z_tos, field, false); 2619 __ push(itos); 2620 // Rewrite bytecode to be faster. 2621 if (do_rewrite) { 2622 patch_bytecode(Bytecodes::_fast_igetfield, bc, Z_ARG5); 2623 } 2624 __ z_bru(Done); 2625 BTB_END(is_Int, bsize, "getfield_or_static:is_Int"); 2626 2627 // ltos 2628 BTB_BEGIN(is_Long, bsize, "getfield_or_static:is_Long"); 2629 __ mem2reg_opt(Z_tos, field); 2630 __ push(ltos); 2631 // Rewrite bytecode to be faster. 2632 if (do_rewrite) { 2633 patch_bytecode(Bytecodes::_fast_lgetfield, bc, Z_ARG5); 2634 } 2635 __ z_bru(Done); 2636 BTB_END(is_Long, bsize, "getfield_or_static:is_Long"); 2637 2638 // ftos 2639 BTB_BEGIN(is_Float, bsize, "getfield_or_static:is_Float"); 2640 __ mem2freg_opt(Z_ftos, field, false); 2641 __ push(ftos); 2642 // Rewrite bytecode to be faster. 2643 if (do_rewrite) { 2644 patch_bytecode(Bytecodes::_fast_fgetfield, bc, Z_ARG5); 2645 } 2646 __ z_bru(Done); 2647 BTB_END(is_Float, bsize, "getfield_or_static:is_Float"); 2648 2649 // dtos 2650 BTB_BEGIN(is_Double, bsize, "getfield_or_static:is_Double"); 2651 __ mem2freg_opt(Z_ftos, field); 2652 __ push(dtos); 2653 // Rewrite bytecode to be faster. 2654 if (do_rewrite) { 2655 patch_bytecode(Bytecodes::_fast_dgetfield, bc, Z_ARG5); 2656 } 2657 __ z_bru(Done); 2658 BTB_END(is_Double, bsize, "getfield_or_static:is_Double"); 2659 2660 // atos 2661 BTB_BEGIN(is_Object, bsize, "getfield_or_static:is_Object"); 2662 __ z_bru(atosHandler); 2663 BTB_END(is_Object, bsize, "getfield_or_static:is_Object"); 2664 2665 // Bad state detection comes at no extra runtime cost. 2666 BTB_BEGIN(is_badState8, bsize, "getfield_or_static:is_badState8"); 2667 __ z_illtrap(); 2668 __ z_bru(is_badState); 2669 BTB_END( is_badState8, bsize, "getfield_or_static:is_badState8"); 2670 BTB_BEGIN(is_badState9, bsize, "getfield_or_static:is_badState9"); 2671 __ z_illtrap(); 2672 __ z_bru(is_badState); 2673 BTB_END( is_badState9, bsize, "getfield_or_static:is_badState9"); 2674 BTB_BEGIN(is_badStateA, bsize, "getfield_or_static:is_badStateA"); 2675 __ z_illtrap(); 2676 __ z_bru(is_badState); 2677 BTB_END( is_badStateA, bsize, "getfield_or_static:is_badStateA"); 2678 BTB_BEGIN(is_badStateB, bsize, "getfield_or_static:is_badStateB"); 2679 __ z_illtrap(); 2680 __ z_bru(is_badState); 2681 BTB_END( is_badStateB, bsize, "getfield_or_static:is_badStateB"); 2682 BTB_BEGIN(is_badStateC, bsize, "getfield_or_static:is_badStateC"); 2683 __ z_illtrap(); 2684 __ z_bru(is_badState); 2685 BTB_END( is_badStateC, bsize, "getfield_or_static:is_badStateC"); 2686 BTB_BEGIN(is_badStateD, bsize, "getfield_or_static:is_badStateD"); 2687 __ z_illtrap(); 2688 __ z_bru(is_badState); 2689 BTB_END( is_badStateD, bsize, "getfield_or_static:is_badStateD"); 2690 BTB_BEGIN(is_badStateE, bsize, "getfield_or_static:is_badStateE"); 2691 __ z_illtrap(); 2692 __ z_bru(is_badState); 2693 BTB_END( is_badStateE, bsize, "getfield_or_static:is_badStateE"); 2694 BTB_BEGIN(is_badStateF, bsize, "getfield_or_static:is_badStateF"); 2695 __ z_illtrap(); 2696 __ z_bru(is_badState); 2697 BTB_END( is_badStateF, bsize, "getfield_or_static:is_badStateF"); 2698 2699 __ align_address(64); 2700 BIND(is_badState); // Do this outside branch table. Needs a lot of space. 2701 { 2702 unsigned int b_off = __ offset(); 2703 if (is_static) { 2704 __ stop_static("Bad state in getstatic"); 2705 } else { 2706 __ stop_static("Bad state in getfield"); 2707 } 2708 unsigned int e_off = __ offset(); 2709 } 2710 2711 __ align_address(64); 2712 BIND(atosHandler); // Oops are really complicated to handle. 2713 // There is a lot of code generated. 2714 // Therefore: generate the handler outside of branch table. 2715 // There is no performance penalty. The additional branch 2716 // to here is compensated for by the fallthru to "Done". 2717 { 2718 unsigned int b_off = __ offset(); 2719 __ load_heap_oop(Z_tos, field); 2720 __ verify_oop(Z_tos); 2721 __ push(atos); 2722 if (do_rewrite) { 2723 patch_bytecode(Bytecodes::_fast_agetfield, bc, Z_ARG5); 2724 } 2725 unsigned int e_off = __ offset(); 2726 } 2727 2728 BIND(Done); 2729 } 2730 2731 void TemplateTable::getfield(int byte_no) { 2732 BLOCK_COMMENT("getfield {"); 2733 getfield_or_static(byte_no, false); 2734 BLOCK_COMMENT("} getfield"); 2735 } 2736 2737 void TemplateTable::nofast_getfield(int byte_no) { 2738 getfield_or_static(byte_no, false, may_not_rewrite); 2739 } 2740 2741 void TemplateTable::getstatic(int byte_no) { 2742 BLOCK_COMMENT("getstatic {"); 2743 getfield_or_static(byte_no, true); 2744 BLOCK_COMMENT("} getstatic"); 2745 } 2746 2747 // The registers cache and index expected to be set before call. The 2748 // function may destroy various registers, just not the cache and 2749 // index registers. 2750 void TemplateTable::jvmti_post_field_mod(Register cache, 2751 Register index, bool is_static) { 2752 transition(vtos, vtos); 2753 2754 if (!JvmtiExport::can_post_field_modification()) { 2755 return; 2756 } 2757 2758 BLOCK_COMMENT("jvmti_post_field_mod {"); 2759 2760 // Check to see if a field modification watch has been set before 2761 // we take the time to call into the VM. 2762 Label L1; 2763 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2764 assert_different_registers(cache, index, Z_tos); 2765 2766 __ load_absolute_address(Z_tos, (address)JvmtiExport::get_field_modification_count_addr()); 2767 __ load_and_test_int(Z_R0, Address(Z_tos)); 2768 __ z_brz(L1); 2769 2770 // Index is returned as byte offset, do not shift! 2771 __ get_cache_and_index_at_bcp(Z_ARG3, Z_R1_scratch, 1); 2772 2773 if (is_static) { 2774 // Life is simple. Null out the object pointer. 2775 __ clear_reg(Z_ARG2, true, false); // Don't set CC. 2776 } else { 2777 // Life is harder. The stack holds the value on top, followed by 2778 // the object. We don't know the size of the value, though. It 2779 // could be one or two words depending on its type. As a result, 2780 // we must find the type to determine where the object is. 2781 __ mem2reg_opt(Z_ARG4, 2782 Address(Z_ARG3, Z_R1_scratch, 2783 in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()) + 2784 (BytesPerLong - BytesPerInt)), 2785 false); 2786 __ z_srl(Z_ARG4, ConstantPoolCacheEntry::tos_state_shift); 2787 // Make sure we don't need to mask Z_ARG4 for tos_state after the above shift. 2788 ConstantPoolCacheEntry::verify_tos_state_shift(); 2789 __ mem2reg_opt(Z_ARG2, at_tos(1)); // Initially assume a one word jvalue. 2790 2791 NearLabel load_dtos, cont; 2792 2793 __ compareU32_and_branch(Z_ARG4, (intptr_t) ltos, 2794 Assembler::bcondNotEqual, load_dtos); 2795 __ mem2reg_opt(Z_ARG2, at_tos(2)); // ltos (two word jvalue) 2796 __ z_bru(cont); 2797 2798 __ bind(load_dtos); 2799 __ compareU32_and_branch(Z_ARG4, (intptr_t)dtos, Assembler::bcondNotEqual, cont); 2800 __ mem2reg_opt(Z_ARG2, at_tos(2)); // dtos (two word jvalue) 2801 2802 __ bind(cont); 2803 } 2804 // cache entry pointer 2805 2806 __ add2reg_with_index(Z_ARG3, in_bytes(cp_base_offset), Z_ARG3, Z_R1_scratch); 2807 2808 // object(tos) 2809 __ load_address(Z_ARG4, Address(Z_esp, Interpreter::stackElementSize)); 2810 // Z_ARG2: object pointer set up above (NULL if static) 2811 // Z_ARG3: cache entry pointer 2812 // Z_ARG4: jvalue object on the stack 2813 __ call_VM(noreg, 2814 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), 2815 Z_ARG2, Z_ARG3, Z_ARG4); 2816 __ get_cache_and_index_at_bcp(cache, index, 1); 2817 2818 __ bind(L1); 2819 BLOCK_COMMENT("} jvmti_post_field_mod"); 2820 } 2821 2822 2823 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2824 transition(vtos, vtos); 2825 2826 const Register cache = Z_tmp_1; 2827 const Register index = Z_ARG5; 2828 const Register obj = Z_tmp_1; 2829 const Register off = Z_tmp_2; 2830 const Register flags = Z_R1_scratch; 2831 const Register br_tab = Z_ARG5; 2832 const Register bc = Z_tmp_1; 2833 const Register oopStore_tmp1 = Z_R1_scratch; 2834 const Register oopStore_tmp2 = Z_ARG5; 2835 const Register oopStore_tmp3 = Z_R0_scratch; 2836 2837 resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); 2838 jvmti_post_field_mod(cache, index, is_static); 2839 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); 2840 // begin of life for: 2841 // obj, off long life range 2842 // flags short life range, up to branch into branch table 2843 // end of life for: 2844 // cache, index 2845 2846 const Address field(obj, off); 2847 Label is_Byte, is_Bool, is_Int, is_Short, is_Char, 2848 is_Long, is_Float, is_Object, is_Double; 2849 Label is_badState8, is_badState9, is_badStateA, is_badStateB, 2850 is_badStateC, is_badStateD, is_badStateE, is_badStateF, 2851 is_badState; 2852 Label branchTable, atosHandler, Done; 2853 bool do_rewrite = !is_static && (rc == may_rewrite); 2854 bool dont_rewrite = (is_static || (rc == may_not_rewrite)); 2855 2856 assert(do_rewrite == !dont_rewrite, "Oops, code is not fit for that"); 2857 2858 assert(btos == 0, "change code, btos != 0"); 2859 2860 #ifdef ASSERT 2861 const unsigned int bsize = is_static ? BTB_MINSIZE*1 : BTB_MINSIZE*4; 2862 #else 2863 const unsigned int bsize = is_static ? BTB_MINSIZE*1 : BTB_MINSIZE*8; 2864 #endif 2865 2866 // Calculate address of branch table entry and branch there. 2867 { 2868 const int bit_shift = exact_log2(bsize); // Size of each branch table entry. 2869 const int r_bitpos = 63 - bit_shift; 2870 const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::tos_state_bits + 1; 2871 const int n_rotate = (bit_shift-ConstantPoolCacheEntry::tos_state_shift); 2872 __ z_larl(br_tab, branchTable); 2873 __ rotate_then_insert(flags, flags, l_bitpos, r_bitpos, n_rotate, true); 2874 __ z_bc(Assembler::bcondAlways, 0, flags, br_tab); 2875 } 2876 // end of life for: 2877 // flags, br_tab 2878 2879 __ align_address(bsize); 2880 BIND(branchTable); 2881 2882 // btos 2883 BTB_BEGIN(is_Byte, bsize, "putfield_or_static:is_Byte"); 2884 __ pop(btos); 2885 if (!is_static) { 2886 pop_and_check_object(obj); 2887 } 2888 __ z_stc(Z_tos, field); 2889 if (do_rewrite) { 2890 patch_bytecode(Bytecodes::_fast_bputfield, bc, Z_ARG5, true, byte_no); 2891 } 2892 __ z_bru(Done); 2893 BTB_END( is_Byte, bsize, "putfield_or_static:is_Byte"); 2894 2895 // ztos 2896 BTB_BEGIN(is_Bool, bsize, "putfield_or_static:is_Bool"); 2897 __ pop(ztos); 2898 if (!is_static) { 2899 pop_and_check_object(obj); 2900 } 2901 __ z_nilf(Z_tos, 0x1); 2902 __ z_stc(Z_tos, field); 2903 if (do_rewrite) { 2904 patch_bytecode(Bytecodes::_fast_zputfield, bc, Z_ARG5, true, byte_no); 2905 } 2906 __ z_bru(Done); 2907 BTB_END(is_Bool, bsize, "putfield_or_static:is_Bool"); 2908 2909 // ctos 2910 BTB_BEGIN(is_Char, bsize, "putfield_or_static:is_Char"); 2911 __ pop(ctos); 2912 if (!is_static) { 2913 pop_and_check_object(obj); 2914 } 2915 __ z_sth(Z_tos, field); 2916 if (do_rewrite) { 2917 patch_bytecode(Bytecodes::_fast_cputfield, bc, Z_ARG5, true, byte_no); 2918 } 2919 __ z_bru(Done); 2920 BTB_END( is_Char, bsize, "putfield_or_static:is_Char"); 2921 2922 // stos 2923 BTB_BEGIN(is_Short, bsize, "putfield_or_static:is_Short"); 2924 __ pop(stos); 2925 if (!is_static) { 2926 pop_and_check_object(obj); 2927 } 2928 __ z_sth(Z_tos, field); 2929 if (do_rewrite) { 2930 patch_bytecode(Bytecodes::_fast_sputfield, bc, Z_ARG5, true, byte_no); 2931 } 2932 __ z_bru(Done); 2933 BTB_END( is_Short, bsize, "putfield_or_static:is_Short"); 2934 2935 // itos 2936 BTB_BEGIN(is_Int, bsize, "putfield_or_static:is_Int"); 2937 __ pop(itos); 2938 if (!is_static) { 2939 pop_and_check_object(obj); 2940 } 2941 __ reg2mem_opt(Z_tos, field, false); 2942 if (do_rewrite) { 2943 patch_bytecode(Bytecodes::_fast_iputfield, bc, Z_ARG5, true, byte_no); 2944 } 2945 __ z_bru(Done); 2946 BTB_END( is_Int, bsize, "putfield_or_static:is_Int"); 2947 2948 // ltos 2949 BTB_BEGIN(is_Long, bsize, "putfield_or_static:is_Long"); 2950 __ pop(ltos); 2951 if (!is_static) { 2952 pop_and_check_object(obj); 2953 } 2954 __ reg2mem_opt(Z_tos, field); 2955 if (do_rewrite) { 2956 patch_bytecode(Bytecodes::_fast_lputfield, bc, Z_ARG5, true, byte_no); 2957 } 2958 __ z_bru(Done); 2959 BTB_END( is_Long, bsize, "putfield_or_static:is_Long"); 2960 2961 // ftos 2962 BTB_BEGIN(is_Float, bsize, "putfield_or_static:is_Float"); 2963 __ pop(ftos); 2964 if (!is_static) { 2965 pop_and_check_object(obj); 2966 } 2967 __ freg2mem_opt(Z_ftos, field, false); 2968 if (do_rewrite) { 2969 patch_bytecode(Bytecodes::_fast_fputfield, bc, Z_ARG5, true, byte_no); 2970 } 2971 __ z_bru(Done); 2972 BTB_END( is_Float, bsize, "putfield_or_static:is_Float"); 2973 2974 // dtos 2975 BTB_BEGIN(is_Double, bsize, "putfield_or_static:is_Double"); 2976 __ pop(dtos); 2977 if (!is_static) { 2978 pop_and_check_object(obj); 2979 } 2980 __ freg2mem_opt(Z_ftos, field); 2981 if (do_rewrite) { 2982 patch_bytecode(Bytecodes::_fast_dputfield, bc, Z_ARG5, true, byte_no); 2983 } 2984 __ z_bru(Done); 2985 BTB_END( is_Double, bsize, "putfield_or_static:is_Double"); 2986 2987 // atos 2988 BTB_BEGIN(is_Object, bsize, "putfield_or_static:is_Object"); 2989 __ z_bru(atosHandler); 2990 BTB_END( is_Object, bsize, "putfield_or_static:is_Object"); 2991 2992 // Bad state detection comes at no extra runtime cost. 2993 BTB_BEGIN(is_badState8, bsize, "putfield_or_static:is_badState8"); 2994 __ z_illtrap(); 2995 __ z_bru(is_badState); 2996 BTB_END( is_badState8, bsize, "putfield_or_static:is_badState8"); 2997 BTB_BEGIN(is_badState9, bsize, "putfield_or_static:is_badState9"); 2998 __ z_illtrap(); 2999 __ z_bru(is_badState); 3000 BTB_END( is_badState9, bsize, "putfield_or_static:is_badState9"); 3001 BTB_BEGIN(is_badStateA, bsize, "putfield_or_static:is_badStateA"); 3002 __ z_illtrap(); 3003 __ z_bru(is_badState); 3004 BTB_END( is_badStateA, bsize, "putfield_or_static:is_badStateA"); 3005 BTB_BEGIN(is_badStateB, bsize, "putfield_or_static:is_badStateB"); 3006 __ z_illtrap(); 3007 __ z_bru(is_badState); 3008 BTB_END( is_badStateB, bsize, "putfield_or_static:is_badStateB"); 3009 BTB_BEGIN(is_badStateC, bsize, "putfield_or_static:is_badStateC"); 3010 __ z_illtrap(); 3011 __ z_bru(is_badState); 3012 BTB_END( is_badStateC, bsize, "putfield_or_static:is_badStateC"); 3013 BTB_BEGIN(is_badStateD, bsize, "putfield_or_static:is_badStateD"); 3014 __ z_illtrap(); 3015 __ z_bru(is_badState); 3016 BTB_END( is_badStateD, bsize, "putfield_or_static:is_badStateD"); 3017 BTB_BEGIN(is_badStateE, bsize, "putfield_or_static:is_badStateE"); 3018 __ z_illtrap(); 3019 __ z_bru(is_badState); 3020 BTB_END( is_badStateE, bsize, "putfield_or_static:is_badStateE"); 3021 BTB_BEGIN(is_badStateF, bsize, "putfield_or_static:is_badStateF"); 3022 __ z_illtrap(); 3023 __ z_bru(is_badState); 3024 BTB_END( is_badStateF, bsize, "putfield_or_static:is_badStateF"); 3025 3026 __ align_address(64); 3027 BIND(is_badState); // Do this outside branch table. Needs a lot of space. 3028 { 3029 unsigned int b_off = __ offset(); 3030 if (is_static) __ stop_static("Bad state in putstatic"); 3031 else __ stop_static("Bad state in putfield"); 3032 unsigned int e_off = __ offset(); 3033 } 3034 3035 __ align_address(64); 3036 BIND(atosHandler); // Oops are really complicated to handle. 3037 // There is a lot of code generated. 3038 // Therefore: generate the handler outside of branch table. 3039 // There is no performance penalty. The additional branch 3040 // to here is compensated for by the fallthru to "Done". 3041 { 3042 unsigned int b_off = __ offset(); 3043 __ pop(atos); 3044 if (!is_static) { 3045 pop_and_check_object(obj); 3046 } 3047 // Store into the field 3048 do_oop_store(_masm, obj, off, Z_tos, false, 3049 oopStore_tmp1, oopStore_tmp2, oopStore_tmp3, _bs->kind(), false); 3050 if (do_rewrite) { 3051 patch_bytecode(Bytecodes::_fast_aputfield, bc, Z_ARG5, true, byte_no); 3052 } 3053 // __ z_bru(Done); // fallthru 3054 unsigned int e_off = __ offset(); 3055 } 3056 3057 BIND(Done); 3058 3059 // Check for volatile store. 3060 Label notVolatile; 3061 3062 __ testbit(Z_ARG4, ConstantPoolCacheEntry::is_volatile_shift); 3063 __ z_brz(notVolatile); 3064 __ z_fence(); 3065 3066 BIND(notVolatile); 3067 } 3068 3069 void TemplateTable::putfield(int byte_no) { 3070 BLOCK_COMMENT("putfield {"); 3071 putfield_or_static(byte_no, false); 3072 BLOCK_COMMENT("} putfield"); 3073 } 3074 3075 void TemplateTable::nofast_putfield(int byte_no) { 3076 putfield_or_static(byte_no, false, may_not_rewrite); 3077 } 3078 3079 void TemplateTable::putstatic(int byte_no) { 3080 BLOCK_COMMENT("putstatic {"); 3081 putfield_or_static(byte_no, true); 3082 BLOCK_COMMENT("} putstatic"); 3083 } 3084 3085 // Push the tos value back to the stack. 3086 // gc will find oops there and update. 3087 void TemplateTable::jvmti_post_fast_field_mod() { 3088 3089 if (!JvmtiExport::can_post_field_modification()) { 3090 return; 3091 } 3092 3093 // Check to see if a field modification watch has been set before 3094 // we take the time to call into the VM. 3095 Label exit; 3096 3097 BLOCK_COMMENT("jvmti_post_fast_field_mod {"); 3098 3099 __ load_absolute_address(Z_R1_scratch, 3100 (address) JvmtiExport::get_field_modification_count_addr()); 3101 __ load_and_test_int(Z_R0_scratch, Address(Z_R1_scratch)); 3102 __ z_brz(exit); 3103 3104 Register obj = Z_tmp_1; 3105 3106 __ pop_ptr(obj); // Copy the object pointer from tos. 3107 __ verify_oop(obj); 3108 __ push_ptr(obj); // Put the object pointer back on tos. 3109 3110 // Save tos values before call_VM() clobbers them. Since we have 3111 // to do it for every data type, we use the saved values as the 3112 // jvalue object. 3113 switch (bytecode()) { // Load values into the jvalue object. 3114 case Bytecodes::_fast_aputfield: 3115 __ push_ptr(Z_tos); 3116 break; 3117 case Bytecodes::_fast_bputfield: 3118 case Bytecodes::_fast_zputfield: 3119 case Bytecodes::_fast_sputfield: 3120 case Bytecodes::_fast_cputfield: 3121 case Bytecodes::_fast_iputfield: 3122 __ push_i(Z_tos); 3123 break; 3124 case Bytecodes::_fast_dputfield: 3125 __ push_d(); 3126 break; 3127 case Bytecodes::_fast_fputfield: 3128 __ push_f(); 3129 break; 3130 case Bytecodes::_fast_lputfield: 3131 __ push_l(Z_tos); 3132 break; 3133 3134 default: 3135 ShouldNotReachHere(); 3136 } 3137 3138 // jvalue on the stack 3139 __ load_address(Z_ARG4, Address(Z_esp, Interpreter::stackElementSize)); 3140 // Access constant pool cache entry. 3141 __ get_cache_entry_pointer_at_bcp(Z_ARG3, Z_tos, 1); 3142 __ verify_oop(obj); 3143 3144 // obj : object pointer copied above 3145 // Z_ARG3: cache entry pointer 3146 // Z_ARG4: jvalue object on the stack 3147 __ call_VM(noreg, 3148 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), 3149 obj, Z_ARG3, Z_ARG4); 3150 3151 switch (bytecode()) { // Restore tos values. 3152 case Bytecodes::_fast_aputfield: 3153 __ pop_ptr(Z_tos); 3154 break; 3155 case Bytecodes::_fast_bputfield: 3156 case Bytecodes::_fast_zputfield: 3157 case Bytecodes::_fast_sputfield: 3158 case Bytecodes::_fast_cputfield: 3159 case Bytecodes::_fast_iputfield: 3160 __ pop_i(Z_tos); 3161 break; 3162 case Bytecodes::_fast_dputfield: 3163 __ pop_d(Z_ftos); 3164 break; 3165 case Bytecodes::_fast_fputfield: 3166 __ pop_f(Z_ftos); 3167 break; 3168 case Bytecodes::_fast_lputfield: 3169 __ pop_l(Z_tos); 3170 break; 3171 } 3172 3173 __ bind(exit); 3174 BLOCK_COMMENT("} jvmti_post_fast_field_mod"); 3175 } 3176 3177 void TemplateTable::fast_storefield(TosState state) { 3178 transition(state, vtos); 3179 3180 ByteSize base = ConstantPoolCache::base_offset(); 3181 jvmti_post_fast_field_mod(); 3182 3183 // Access constant pool cache. 3184 Register cache = Z_tmp_1; 3185 Register index = Z_tmp_2; 3186 Register flags = Z_ARG5; 3187 3188 // Index comes in bytes, don't shift afterwards! 3189 __ get_cache_and_index_at_bcp(cache, index, 1); 3190 3191 // Test for volatile. 3192 assert(!flags->is_volatile(), "do_oop_store could perform leaf RT call"); 3193 __ z_lg(flags, Address(cache, index, base + ConstantPoolCacheEntry::flags_offset())); 3194 3195 // Replace index with field offset from cache entry. 3196 Register field_offset = index; 3197 __ z_lg(field_offset, Address(cache, index, base + ConstantPoolCacheEntry::f2_offset())); 3198 3199 // Get object from stack. 3200 Register obj = cache; 3201 3202 pop_and_check_object(obj); 3203 3204 // field address 3205 const Address field(obj, field_offset); 3206 3207 // access field 3208 switch (bytecode()) { 3209 case Bytecodes::_fast_aputfield: 3210 do_oop_store(_masm, obj, field_offset, Z_tos, false, 3211 Z_ARG2, Z_ARG3, Z_ARG4, _bs->kind(), false); 3212 break; 3213 case Bytecodes::_fast_lputfield: 3214 __ reg2mem_opt(Z_tos, field); 3215 break; 3216 case Bytecodes::_fast_iputfield: 3217 __ reg2mem_opt(Z_tos, field, false); 3218 break; 3219 case Bytecodes::_fast_zputfield: 3220 __ z_nilf(Z_tos, 0x1); 3221 // fall through to bputfield 3222 case Bytecodes::_fast_bputfield: 3223 __ z_stc(Z_tos, field); 3224 break; 3225 case Bytecodes::_fast_sputfield: 3226 // fall through 3227 case Bytecodes::_fast_cputfield: 3228 __ z_sth(Z_tos, field); 3229 break; 3230 case Bytecodes::_fast_fputfield: 3231 __ freg2mem_opt(Z_ftos, field, false); 3232 break; 3233 case Bytecodes::_fast_dputfield: 3234 __ freg2mem_opt(Z_ftos, field); 3235 break; 3236 default: 3237 ShouldNotReachHere(); 3238 } 3239 3240 // Check for volatile store. 3241 Label notVolatile; 3242 3243 __ testbit(flags, ConstantPoolCacheEntry::is_volatile_shift); 3244 __ z_brz(notVolatile); 3245 __ z_fence(); 3246 3247 __ bind(notVolatile); 3248 } 3249 3250 void TemplateTable::fast_accessfield(TosState state) { 3251 transition(atos, state); 3252 3253 Register obj = Z_tos; 3254 3255 // Do the JVMTI work here to avoid disturbing the register state below 3256 if (JvmtiExport::can_post_field_access()) { 3257 // Check to see if a field access watch has been set before we 3258 // take the time to call into the VM. 3259 Label cont; 3260 3261 __ load_absolute_address(Z_R1_scratch, 3262 (address)JvmtiExport::get_field_access_count_addr()); 3263 __ load_and_test_int(Z_R0_scratch, Address(Z_R1_scratch)); 3264 __ z_brz(cont); 3265 3266 // Access constant pool cache entry. 3267 3268 __ get_cache_entry_pointer_at_bcp(Z_ARG3, Z_tmp_1, 1); 3269 __ verify_oop(obj); 3270 __ push_ptr(obj); // Save object pointer before call_VM() clobbers it. 3271 __ z_lgr(Z_ARG2, obj); 3272 3273 // Z_ARG2: object pointer copied above 3274 // Z_ARG3: cache entry pointer 3275 __ call_VM(noreg, 3276 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), 3277 Z_ARG2, Z_ARG3); 3278 __ pop_ptr(obj); // Restore object pointer. 3279 3280 __ bind(cont); 3281 } 3282 3283 // Access constant pool cache. 3284 Register cache = Z_tmp_1; 3285 Register index = Z_tmp_2; 3286 3287 // Index comes in bytes, don't shift afterwards! 3288 __ get_cache_and_index_at_bcp(cache, index, 1); 3289 // Replace index with field offset from cache entry. 3290 __ mem2reg_opt(index, 3291 Address(cache, index, 3292 ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())); 3293 3294 __ verify_oop(obj); 3295 __ null_check(obj); 3296 3297 Address field(obj, index); 3298 3299 // access field 3300 switch (bytecode()) { 3301 case Bytecodes::_fast_agetfield: 3302 __ load_heap_oop(Z_tos, field); 3303 __ verify_oop(Z_tos); 3304 return; 3305 case Bytecodes::_fast_lgetfield: 3306 __ mem2reg_opt(Z_tos, field); 3307 return; 3308 case Bytecodes::_fast_igetfield: 3309 __ mem2reg_opt(Z_tos, field, false); 3310 return; 3311 case Bytecodes::_fast_bgetfield: 3312 __ z_lb(Z_tos, field); 3313 return; 3314 case Bytecodes::_fast_sgetfield: 3315 __ z_lh(Z_tos, field); 3316 return; 3317 case Bytecodes::_fast_cgetfield: 3318 __ z_llgh(Z_tos, field); // Load into 64 bits, works on all CPUs. 3319 return; 3320 case Bytecodes::_fast_fgetfield: 3321 __ mem2freg_opt(Z_ftos, field, false); 3322 return; 3323 case Bytecodes::_fast_dgetfield: 3324 __ mem2freg_opt(Z_ftos, field); 3325 return; 3326 default: 3327 ShouldNotReachHere(); 3328 } 3329 } 3330 3331 void TemplateTable::fast_xaccess(TosState state) { 3332 transition(vtos, state); 3333 3334 Register receiver = Z_tos; 3335 // Get receiver. 3336 __ mem2reg_opt(Z_tos, aaddress(0)); 3337 3338 // Access constant pool cache. 3339 Register cache = Z_tmp_1; 3340 Register index = Z_tmp_2; 3341 3342 // Index comes in bytes, don't shift afterwards! 3343 __ get_cache_and_index_at_bcp(cache, index, 2); 3344 // Replace index with field offset from cache entry. 3345 __ mem2reg_opt(index, 3346 Address(cache, index, 3347 ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())); 3348 3349 // Make sure exception is reported in correct bcp range (getfield is 3350 // next instruction). 3351 __ add2reg(Z_bcp, 1); 3352 __ null_check(receiver); 3353 switch (state) { 3354 case itos: 3355 __ mem2reg_opt(Z_tos, Address(receiver, index), false); 3356 break; 3357 case atos: 3358 __ load_heap_oop(Z_tos, Address(receiver, index)); 3359 __ verify_oop(Z_tos); 3360 break; 3361 case ftos: 3362 __ mem2freg_opt(Z_ftos, Address(receiver, index)); 3363 break; 3364 default: 3365 ShouldNotReachHere(); 3366 } 3367 3368 // Reset bcp to original position. 3369 __ add2reg(Z_bcp, -1); 3370 } 3371 3372 //----------------------------------------------------------------------------- 3373 // Calls 3374 3375 void TemplateTable::prepare_invoke(int byte_no, 3376 Register method, // linked method (or i-klass) 3377 Register index, // itable index, MethodType, etc. 3378 Register recv, // If caller wants to see it. 3379 Register flags) { // If caller wants to test it. 3380 // Determine flags. 3381 const Bytecodes::Code code = bytecode(); 3382 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 3383 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 3384 const bool is_invokehandle = code == Bytecodes::_invokehandle; 3385 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 3386 const bool is_invokespecial = code == Bytecodes::_invokespecial; 3387 const bool load_receiver = (recv != noreg); 3388 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 3389 3390 // Setup registers & access constant pool cache. 3391 if (recv == noreg) { recv = Z_ARG1; } 3392 if (flags == noreg) { flags = Z_ARG2; } 3393 assert_different_registers(method, Z_R14, index, recv, flags); 3394 3395 BLOCK_COMMENT("prepare_invoke {"); 3396 3397 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); 3398 3399 // Maybe push appendix to arguments. 3400 if (is_invokedynamic || is_invokehandle) { 3401 Label L_no_push; 3402 Register resolved_reference = Z_R1_scratch; 3403 __ testbit(flags, ConstantPoolCacheEntry::has_appendix_shift); 3404 __ z_bfalse(L_no_push); 3405 // Push the appendix as a trailing parameter. 3406 // This must be done before we get the receiver, 3407 // since the parameter_size includes it. 3408 __ load_resolved_reference_at_index(resolved_reference, index); 3409 __ verify_oop(resolved_reference); 3410 __ push_ptr(resolved_reference); // Push appendix (MethodType, CallSite, etc.). 3411 __ bind(L_no_push); 3412 } 3413 3414 // Load receiver if needed (after appendix is pushed so parameter size is correct). 3415 if (load_receiver) { 3416 assert(!is_invokedynamic, ""); 3417 // recv := int2long(flags & ConstantPoolCacheEntry::parameter_size_mask) << 3 3418 // Flags is zero-extended int2long when loaded during load_invoke_cp_cache_entry(). 3419 // Only the least significant byte (psize) of flags is used. 3420 { 3421 const unsigned int logSES = Interpreter::logStackElementSize; 3422 const int bit_shift = logSES; 3423 const int r_bitpos = 63 - bit_shift; 3424 const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::parameter_size_bits + 1; 3425 const int n_rotate = bit_shift; 3426 assert(ConstantPoolCacheEntry::parameter_size_mask == 255, "adapt bitpositions"); 3427 __ rotate_then_insert(recv, flags, l_bitpos, r_bitpos, n_rotate, true); 3428 } 3429 // Recv now contains #arguments * StackElementSize. 3430 3431 Address recv_addr(Z_esp, recv); 3432 __ z_lg(recv, recv_addr); 3433 __ verify_oop(recv); 3434 } 3435 3436 // Compute return type. 3437 // ret_type is used by callers (invokespecial, invokestatic) at least. 3438 Register ret_type = Z_R1_scratch; 3439 assert_different_registers(ret_type, method); 3440 3441 const address table_addr = (address)Interpreter::invoke_return_entry_table_for(code); 3442 __ load_absolute_address(Z_R14, table_addr); 3443 3444 { 3445 const int bit_shift = LogBytesPerWord; // Size of each table entry. 3446 const int r_bitpos = 63 - bit_shift; 3447 const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::tos_state_bits + 1; 3448 const int n_rotate = bit_shift-ConstantPoolCacheEntry::tos_state_shift; 3449 __ rotate_then_insert(ret_type, flags, l_bitpos, r_bitpos, n_rotate, true); 3450 // Make sure we don't need to mask flags for tos_state after the above shift. 3451 ConstantPoolCacheEntry::verify_tos_state_shift(); 3452 } 3453 3454 __ z_lg(Z_R14, Address(Z_R14, ret_type)); // Load return address. 3455 BLOCK_COMMENT("} prepare_invoke"); 3456 } 3457 3458 3459 void TemplateTable::invokevirtual_helper(Register index, 3460 Register recv, 3461 Register flags) { 3462 // Uses temporary registers Z_tmp_2, Z_ARG4. 3463 assert_different_registers(index, recv, Z_tmp_2, Z_ARG4); 3464 3465 // Test for an invoke of a final method. 3466 Label notFinal; 3467 3468 BLOCK_COMMENT("invokevirtual_helper {"); 3469 3470 __ testbit(flags, ConstantPoolCacheEntry::is_vfinal_shift); 3471 __ z_brz(notFinal); 3472 3473 const Register method = index; // Method must be Z_ARG3. 3474 assert(method == Z_ARG3, "method must be second argument for interpreter calling convention"); 3475 3476 // Do the call - the index is actually the method to call. 3477 // That is, f2 is a vtable index if !is_vfinal, else f2 is a method. 3478 3479 // It's final, need a null check here! 3480 __ null_check(recv); 3481 3482 // Profile this call. 3483 __ profile_final_call(Z_tmp_2); 3484 __ profile_arguments_type(Z_tmp_2, method, Z_ARG5, true); // Argument type profiling. 3485 __ jump_from_interpreted(method, Z_tmp_2); 3486 3487 __ bind(notFinal); 3488 3489 // Get receiver klass. 3490 __ null_check(recv, Z_R0_scratch, oopDesc::klass_offset_in_bytes()); 3491 __ load_klass(Z_tmp_2, recv); 3492 3493 // Profile this call. 3494 __ profile_virtual_call(Z_tmp_2, Z_ARG4, Z_ARG5); 3495 3496 // Get target method & entry point. 3497 __ z_sllg(index, index, exact_log2(vtableEntry::size_in_bytes())); 3498 __ mem2reg_opt(method, 3499 Address(Z_tmp_2, index, 3500 Klass::vtable_start_offset() + in_ByteSize(vtableEntry::method_offset_in_bytes()))); 3501 __ profile_arguments_type(Z_ARG4, method, Z_ARG5, true); 3502 __ jump_from_interpreted(method, Z_ARG4); 3503 BLOCK_COMMENT("} invokevirtual_helper"); 3504 } 3505 3506 void TemplateTable::invokevirtual(int byte_no) { 3507 transition(vtos, vtos); 3508 3509 assert(byte_no == f2_byte, "use this argument"); 3510 prepare_invoke(byte_no, 3511 Z_ARG3, // method or vtable index 3512 noreg, // unused itable index 3513 Z_ARG1, // recv 3514 Z_ARG2); // flags 3515 3516 // Z_ARG3 : index 3517 // Z_ARG1 : receiver 3518 // Z_ARG2 : flags 3519 invokevirtual_helper(Z_ARG3, Z_ARG1, Z_ARG2); 3520 } 3521 3522 void TemplateTable::invokespecial(int byte_no) { 3523 transition(vtos, vtos); 3524 3525 assert(byte_no == f1_byte, "use this argument"); 3526 Register Rmethod = Z_tmp_2; 3527 prepare_invoke(byte_no, Rmethod, noreg, // Get f1 method. 3528 Z_ARG3); // Get receiver also for null check. 3529 __ verify_oop(Z_ARG3); 3530 __ null_check(Z_ARG3); 3531 // Do the call. 3532 __ profile_call(Z_ARG2); 3533 __ profile_arguments_type(Z_ARG2, Rmethod, Z_ARG5, false); 3534 __ jump_from_interpreted(Rmethod, Z_R1_scratch); 3535 } 3536 3537 void TemplateTable::invokestatic(int byte_no) { 3538 transition(vtos, vtos); 3539 3540 assert(byte_no == f1_byte, "use this argument"); 3541 Register Rmethod = Z_tmp_2; 3542 prepare_invoke(byte_no, Rmethod); // Get f1 method. 3543 // Do the call. 3544 __ profile_call(Z_ARG2); 3545 __ profile_arguments_type(Z_ARG2, Rmethod, Z_ARG5, false); 3546 __ jump_from_interpreted(Rmethod, Z_R1_scratch); 3547 } 3548 3549 // Outdated feature, and we don't support it. 3550 void TemplateTable::fast_invokevfinal(int byte_no) { 3551 transition(vtos, vtos); 3552 assert(byte_no == f2_byte, "use this argument"); 3553 __ stop("fast_invokevfinal not used on linuxs390x"); 3554 } 3555 3556 void TemplateTable::invokeinterface(int byte_no) { 3557 transition(vtos, vtos); 3558 3559 assert(byte_no == f1_byte, "use this argument"); 3560 Register interface = Z_tos; 3561 Register index = Z_ARG3; 3562 Register receiver = Z_tmp_1; 3563 Register flags = Z_ARG5; 3564 3565 BLOCK_COMMENT("invokeinterface {"); 3566 3567 // Destroys Z_ARG1 and Z_ARG2, thus use Z_ARG4 and copy afterwards. 3568 prepare_invoke(byte_no, Z_ARG4, index, // Get f1 klassOop, f2 itable index. 3569 receiver, flags); 3570 3571 // Z_R14 (== Z_bytecode) : return entry 3572 3573 __ z_lgr(interface, Z_ARG4); 3574 3575 // Special case of invokeinterface called for virtual method of 3576 // java.lang.Object. See cpCacheOop.cpp for details. 3577 // This code isn't produced by javac, but could be produced by 3578 // another compliant java compiler. 3579 Label notMethod; 3580 __ testbit(flags, ConstantPoolCacheEntry::is_forced_virtual_shift); 3581 __ z_brz(notMethod); 3582 invokevirtual_helper(index, receiver, flags); 3583 __ bind(notMethod); 3584 3585 // Get receiver klass into klass - also a null check. 3586 Register klass = flags; 3587 3588 __ restore_locals(); 3589 __ load_klass(klass, receiver); 3590 3591 // Profile this call. 3592 __ profile_virtual_call(klass, Z_ARG2/*mdp*/, Z_ARG4/*scratch*/); 3593 3594 NearLabel no_such_interface, no_such_method; 3595 Register method = Z_tmp_2; 3596 3597 // TK 2010-08-24: save the index to Z_ARG4. needed in case of an error 3598 // in throw_AbstractMethodErrorByTemplateTable 3599 __ z_lgr(Z_ARG4, index); 3600 // TK 2011-03-24: copy also klass because it could be changed in 3601 // lookup_interface_method 3602 __ z_lgr(Z_ARG2, klass); 3603 __ lookup_interface_method(// inputs: rec. class, interface, itable index 3604 klass, interface, index, 3605 // outputs: method, scan temp. reg 3606 method, Z_tmp_2, Z_R1_scratch, 3607 no_such_interface); 3608 3609 // Check for abstract method error. 3610 // Note: This should be done more efficiently via a throw_abstract_method_error 3611 // interpreter entry point and a conditional jump to it in case of a null 3612 // method. 3613 __ compareU64_and_branch(method, (intptr_t) 0, 3614 Assembler::bcondZero, no_such_method); 3615 3616 __ profile_arguments_type(Z_ARG3, method, Z_ARG5, true); 3617 3618 // Do the call. 3619 __ jump_from_interpreted(method, Z_ARG5); 3620 __ should_not_reach_here(); 3621 3622 // exception handling code follows... 3623 // Note: Must restore interpreter registers to canonical 3624 // state for exception handling to work correctly! 3625 3626 __ bind(no_such_method); 3627 3628 // Throw exception. 3629 __ restore_bcp(); // Bcp must be correct for exception handler (was destroyed). 3630 __ restore_locals(); // Make sure locals pointer is correct as well (was destroyed). 3631 // TK 2010-08-24: Call throw_AbstractMethodErrorByTemplateTable now with the 3632 // relevant information for generating a better error message 3633 __ call_VM(noreg, 3634 CAST_FROM_FN_PTR(address, 3635 InterpreterRuntime::throw_AbstractMethodError), 3636 Z_ARG2, interface, Z_ARG4); 3637 // The call_VM checks for exception, so we should never return here. 3638 __ should_not_reach_here(); 3639 3640 __ bind(no_such_interface); 3641 3642 // Throw exception. 3643 __ restore_bcp(); // Bcp must be correct for exception handler (was destroyed). 3644 __ restore_locals(); // Make sure locals pointer is correct as well (was destroyed). 3645 // TK 2010-08-24: Call throw_IncompatibleClassChangeErrorByTemplateTable now with the 3646 // relevant information for generating a better error message 3647 __ call_VM(noreg, 3648 CAST_FROM_FN_PTR(address, 3649 InterpreterRuntime::throw_IncompatibleClassChangeError), 3650 Z_ARG2, interface); 3651 // The call_VM checks for exception, so we should never return here. 3652 __ should_not_reach_here(); 3653 3654 BLOCK_COMMENT("} invokeinterface"); 3655 return; 3656 } 3657 3658 void TemplateTable::invokehandle(int byte_no) { 3659 transition(vtos, vtos); 3660 3661 const Register method = Z_tmp_2; 3662 const Register recv = Z_ARG5; 3663 const Register mtype = Z_tmp_1; 3664 prepare_invoke(byte_no, 3665 method, mtype, // Get f2 method, f1 MethodType. 3666 recv); 3667 __ verify_method_ptr(method); 3668 __ verify_oop(recv); 3669 __ null_check(recv); 3670 3671 // Note: Mtype is already pushed (if necessary) by prepare_invoke. 3672 3673 // FIXME: profile the LambdaForm also. 3674 __ profile_final_call(Z_ARG2); 3675 __ profile_arguments_type(Z_ARG3, method, Z_ARG5, true); 3676 3677 __ jump_from_interpreted(method, Z_ARG3); 3678 } 3679 3680 void TemplateTable::invokedynamic(int byte_no) { 3681 transition(vtos, vtos); 3682 3683 const Register Rmethod = Z_tmp_2; 3684 const Register Rcallsite = Z_tmp_1; 3685 3686 prepare_invoke(byte_no, Rmethod, Rcallsite); 3687 3688 // Rmethod: CallSite object (from f1) 3689 // Rcallsite: MH.linkToCallSite method (from f2) 3690 3691 // Note: Callsite is already pushed by prepare_invoke. 3692 3693 // TODO: should make a type profile for any invokedynamic that takes a ref argument. 3694 // Profile this call. 3695 __ profile_call(Z_ARG2); 3696 __ profile_arguments_type(Z_ARG2, Rmethod, Z_ARG5, false); 3697 __ jump_from_interpreted(Rmethod, Z_ARG2); 3698 } 3699 3700 //----------------------------------------------------------------------------- 3701 // Allocation 3702 3703 // Original comment on "allow_shared_alloc": 3704 // Always go the slow path. 3705 // + Eliminated optimization within the template-based interpreter: 3706 // If an allocation is done within the interpreter without using 3707 // tlabs, the interpreter tries to do the allocation directly 3708 // on the heap. 3709 // + That means the profiling hooks are not considered and allocations 3710 // get lost for the profiling framework. 3711 // + However, we do not think that this optimization is really needed, 3712 // so we always go now the slow path through the VM in this case -- 3713 // spec jbb2005 shows no measurable performance degradation. 3714 void TemplateTable::_new() { 3715 transition(vtos, atos); 3716 address prev_instr_address = NULL; 3717 Register tags = Z_tmp_1; 3718 Register RallocatedObject = Z_tos; 3719 Register cpool = Z_ARG2; 3720 Register tmp = Z_ARG3; // RobjectFields==tmp and Rsize==offset must be a register pair. 3721 Register offset = Z_ARG4; 3722 Label slow_case; 3723 Label done; 3724 Label initialize_header; 3725 Label allocate_shared; 3726 3727 BLOCK_COMMENT("TemplateTable::_new {"); 3728 __ get_2_byte_integer_at_bcp(offset/*dest*/, 1, InterpreterMacroAssembler::Unsigned); 3729 __ get_cpool_and_tags(cpool, tags); 3730 // Make sure the class we're about to instantiate has been resolved. 3731 // This is done before loading InstanceKlass to be consistent with the order 3732 // how Constant Pool is updated (see ConstantPool::klass_at_put). 3733 const int tags_offset = Array<u1>::base_offset_in_bytes(); 3734 __ load_address(tmp, Address(tags, offset, tags_offset)); 3735 __ z_cli(0, tmp, JVM_CONSTANT_Class); 3736 __ z_brne(slow_case); 3737 3738 __ z_sllg(offset, offset, LogBytesPerWord); // Convert to to offset. 3739 // Get InstanceKlass. 3740 Register iklass = cpool; 3741 __ load_resolved_klass_at_offset(cpool, offset, iklass); 3742 3743 // Make sure klass is initialized & doesn't have finalizer. 3744 // Make sure klass is fully initialized. 3745 const int state_offset = in_bytes(InstanceKlass::init_state_offset()); 3746 if (Immediate::is_uimm12(state_offset)) { 3747 __ z_cli(state_offset, iklass, InstanceKlass::fully_initialized); 3748 } else { 3749 __ z_cliy(state_offset, iklass, InstanceKlass::fully_initialized); 3750 } 3751 __ z_brne(slow_case); 3752 3753 // Get instance_size in InstanceKlass (scaled to a count of bytes). 3754 Register Rsize = offset; 3755 const int mask = 1 << Klass::_lh_instance_slow_path_bit; 3756 __ z_llgf(Rsize, Address(iklass, Klass::layout_helper_offset())); 3757 __ z_tmll(Rsize, mask); 3758 __ z_btrue(slow_case); 3759 3760 // Allocate the instance 3761 // 1) Try to allocate in the TLAB. 3762 // 2) If the above fails (or is not applicable), go to a slow case 3763 // (creates a new TLAB, etc.). 3764 // Note: compared to other architectures, s390's implementation always goes 3765 // to the slow path if TLAB is used and fails. 3766 if (UseTLAB) { 3767 Register RoldTopValue = RallocatedObject; 3768 Register RnewTopValue = tmp; 3769 __ z_lg(RoldTopValue, Address(Z_thread, JavaThread::tlab_top_offset())); 3770 __ load_address(RnewTopValue, Address(RoldTopValue, Rsize)); 3771 __ z_cg(RnewTopValue, Address(Z_thread, JavaThread::tlab_current_end_offset())); 3772 __ z_brh(slow_case); 3773 __ z_stg(RnewTopValue, Address(Z_thread, JavaThread::tlab_top_offset())); 3774 3775 Register RobjectFields = tmp; 3776 Register Rzero = Z_R1_scratch; 3777 __ clear_reg(Rzero, true /*whole reg*/, false); // Load 0L into Rzero. Don't set CC. 3778 3779 if (!ZeroTLAB) { 3780 // The object is initialized before the header. If the object size is 3781 // zero, go directly to the header initialization. 3782 __ z_aghi(Rsize, (int)-sizeof(oopDesc)); // Subtract header size, set CC. 3783 __ z_bre(initialize_header); // Jump if size of fields is zero. 3784 3785 // Initialize object fields. 3786 // See documentation for MVCLE instruction!!! 3787 assert(RobjectFields->encoding() % 2 == 0, "RobjectFields must be an even register"); 3788 assert(Rsize->encoding() == (RobjectFields->encoding()+1), 3789 "RobjectFields and Rsize must be a register pair"); 3790 assert(Rzero->encoding() % 2 == 1, "Rzero must be an odd register"); 3791 3792 // Set Rzero to 0 and use it as src length, then mvcle will copy nothing 3793 // and fill the object with the padding value 0. 3794 __ add2reg(RobjectFields, sizeof(oopDesc), RallocatedObject); 3795 __ move_long_ext(RobjectFields, as_Register(Rzero->encoding() - 1), 0); 3796 } 3797 3798 // Initialize object header only. 3799 __ bind(initialize_header); 3800 if (UseBiasedLocking) { 3801 Register prototype = RobjectFields; 3802 __ z_lg(prototype, Address(iklass, Klass::prototype_header_offset())); 3803 __ z_stg(prototype, Address(RallocatedObject, oopDesc::mark_offset_in_bytes())); 3804 } else { 3805 __ store_const(Address(RallocatedObject, oopDesc::mark_offset_in_bytes()), 3806 (long)markOopDesc::prototype()); 3807 } 3808 3809 __ store_klass_gap(Rzero, RallocatedObject); // Zero klass gap for compressed oops. 3810 __ store_klass(iklass, RallocatedObject); // Store klass last. 3811 3812 { 3813 SkipIfEqual skip(_masm, &DTraceAllocProbes, false, Z_ARG5 /*scratch*/); 3814 // Trigger dtrace event for fastpath. 3815 __ push(atos); // Save the return value. 3816 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), RallocatedObject); 3817 __ pop(atos); // Restore the return value. 3818 } 3819 __ z_bru(done); 3820 } 3821 3822 // slow case 3823 __ bind(slow_case); 3824 __ get_constant_pool(Z_ARG2); 3825 __ get_2_byte_integer_at_bcp(Z_ARG3/*dest*/, 1, InterpreterMacroAssembler::Unsigned); 3826 call_VM(Z_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Z_ARG2, Z_ARG3); 3827 __ verify_oop(Z_tos); 3828 3829 // continue 3830 __ bind(done); 3831 3832 BLOCK_COMMENT("} TemplateTable::_new"); 3833 } 3834 3835 void TemplateTable::newarray() { 3836 transition(itos, atos); 3837 3838 // Call runtime. 3839 __ z_llgc(Z_ARG2, at_bcp(1)); // type 3840 __ z_lgfr(Z_ARG3, Z_tos); // size 3841 call_VM(Z_RET, 3842 CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), 3843 Z_ARG2, Z_ARG3); 3844 } 3845 3846 void TemplateTable::anewarray() { 3847 transition(itos, atos); 3848 __ get_2_byte_integer_at_bcp(Z_ARG3, 1, InterpreterMacroAssembler::Unsigned); 3849 __ get_constant_pool(Z_ARG2); 3850 __ z_lgfr(Z_ARG4, Z_tos); 3851 call_VM(Z_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), 3852 Z_ARG2, Z_ARG3, Z_ARG4); 3853 } 3854 3855 void TemplateTable::arraylength() { 3856 transition(atos, itos); 3857 3858 int offset = arrayOopDesc::length_offset_in_bytes(); 3859 3860 __ null_check(Z_tos, Z_R0_scratch, offset); 3861 __ mem2reg_opt(Z_tos, Address(Z_tos, offset), false); 3862 } 3863 3864 void TemplateTable::checkcast() { 3865 transition(atos, atos); 3866 3867 NearLabel done, is_null, ok_is_subtype, quicked, resolved; 3868 3869 BLOCK_COMMENT("checkcast {"); 3870 // If object is NULL, we are almost done. 3871 __ compareU64_and_branch(Z_tos, (intptr_t) 0, Assembler::bcondZero, is_null); 3872 3873 // Get cpool & tags index. 3874 Register cpool = Z_tmp_1; 3875 Register tags = Z_tmp_2; 3876 Register index = Z_ARG5; 3877 3878 __ get_cpool_and_tags(cpool, tags); 3879 __ get_2_byte_integer_at_bcp(index, 1, InterpreterMacroAssembler::Unsigned); 3880 // See if bytecode has already been quicked. 3881 // Note: For CLI, we would have to add the index to the tags pointer first, 3882 // thus load and compare in a "classic" manner. 3883 __ z_llgc(Z_R0_scratch, 3884 Address(tags, index, Array<u1>::base_offset_in_bytes())); 3885 __ compareU64_and_branch(Z_R0_scratch, JVM_CONSTANT_Class, 3886 Assembler::bcondEqual, quicked); 3887 3888 __ push(atos); // Save receiver for result, and for GC. 3889 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3890 __ get_vm_result_2(Z_tos); 3891 3892 Register receiver = Z_ARG4; 3893 Register klass = Z_tos; 3894 Register subklass = Z_ARG5; 3895 3896 __ pop_ptr(receiver); // restore receiver 3897 __ z_bru(resolved); 3898 3899 // Get superklass in klass and subklass in subklass. 3900 __ bind(quicked); 3901 3902 __ z_lgr(Z_ARG4, Z_tos); // Save receiver. 3903 __ z_sllg(index, index, LogBytesPerWord); // index2bytes for addressing 3904 __ load_resolved_klass_at_offset(cpool, index, klass); 3905 3906 __ bind(resolved); 3907 3908 __ load_klass(subklass, receiver); 3909 3910 // Generate subtype check. Object in receiver. 3911 // Superklass in klass. Subklass in subklass. 3912 __ gen_subtype_check(subklass, klass, Z_ARG3, Z_tmp_1, ok_is_subtype); 3913 3914 // Come here on failure. 3915 __ push_ptr(receiver); 3916 // Object is at TOS, target klass oop expected in rax by convention. 3917 __ z_brul((address) Interpreter::_throw_ClassCastException_entry); 3918 3919 // Come here on success. 3920 __ bind(ok_is_subtype); 3921 3922 __ z_lgr(Z_tos, receiver); // Restore object. 3923 3924 // Collect counts on whether this test sees NULLs a lot or not. 3925 if (ProfileInterpreter) { 3926 __ z_bru(done); 3927 __ bind(is_null); 3928 __ profile_null_seen(Z_tmp_1); 3929 } else { 3930 __ bind(is_null); // Same as 'done'. 3931 } 3932 3933 __ bind(done); 3934 BLOCK_COMMENT("} checkcast"); 3935 } 3936 3937 void TemplateTable::instanceof() { 3938 transition(atos, itos); 3939 3940 NearLabel done, is_null, ok_is_subtype, quicked, resolved; 3941 3942 BLOCK_COMMENT("instanceof {"); 3943 // If object is NULL, we are almost done. 3944 __ compareU64_and_branch(Z_tos, (intptr_t) 0, Assembler::bcondZero, is_null); 3945 3946 // Get cpool & tags index. 3947 Register cpool = Z_tmp_1; 3948 Register tags = Z_tmp_2; 3949 Register index = Z_ARG5; 3950 3951 __ get_cpool_and_tags(cpool, tags); 3952 __ get_2_byte_integer_at_bcp(index, 1, InterpreterMacroAssembler::Unsigned); 3953 // See if bytecode has already been quicked. 3954 // Note: For CLI, we would have to add the index to the tags pointer first, 3955 // thus load and compare in a "classic" manner. 3956 __ z_llgc(Z_R0_scratch, 3957 Address(tags, index, Array<u1>::base_offset_in_bytes())); 3958 __ compareU64_and_branch(Z_R0_scratch, JVM_CONSTANT_Class, Assembler::bcondEqual, quicked); 3959 3960 __ push(atos); // Save receiver for result, and for GC. 3961 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3962 __ get_vm_result_2(Z_tos); 3963 3964 Register receiver = Z_tmp_2; 3965 Register klass = Z_tos; 3966 Register subklass = Z_tmp_2; 3967 3968 __ pop_ptr(receiver); // Restore receiver. 3969 __ verify_oop(receiver); 3970 __ load_klass(subklass, subklass); 3971 __ z_bru(resolved); 3972 3973 // Get superklass in klass and subklass in subklass. 3974 __ bind(quicked); 3975 3976 __ load_klass(subklass, Z_tos); 3977 __ z_sllg(index, index, LogBytesPerWord); // index2bytes for addressing 3978 __ load_resolved_klass_at_offset(cpool, index, klass); 3979 3980 __ bind(resolved); 3981 3982 // Generate subtype check. 3983 // Superklass in klass. Subklass in subklass. 3984 __ gen_subtype_check(subklass, klass, Z_ARG4, Z_ARG5, ok_is_subtype); 3985 3986 // Come here on failure. 3987 __ clear_reg(Z_tos, true, false); 3988 __ z_bru(done); 3989 3990 // Come here on success. 3991 __ bind(ok_is_subtype); 3992 __ load_const_optimized(Z_tos, 1); 3993 3994 // Collect counts on whether this test sees NULLs a lot or not. 3995 if (ProfileInterpreter) { 3996 __ z_bru(done); 3997 __ bind(is_null); 3998 __ profile_null_seen(Z_tmp_1); 3999 } else { 4000 __ bind(is_null); // same as 'done' 4001 } 4002 4003 __ bind(done); 4004 // tos = 0: obj == NULL or obj is not an instanceof the specified klass 4005 // tos = 1: obj != NULL and obj is an instanceof the specified klass 4006 BLOCK_COMMENT("} instanceof"); 4007 } 4008 4009 //----------------------------------------------------------------------------- 4010 // Breakpoints 4011 void TemplateTable::_breakpoint() { 4012 4013 // Note: We get here even if we are single stepping. 4014 // Jbug insists on setting breakpoints at every bytecode 4015 // even if we are in single step mode. 4016 4017 transition(vtos, vtos); 4018 4019 // Get the unpatched byte code. 4020 __ get_method(Z_ARG2); 4021 __ call_VM(noreg, 4022 CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), 4023 Z_ARG2, Z_bcp); 4024 // Save the result to a register that is preserved over C-function calls. 4025 __ z_lgr(Z_tmp_1, Z_RET); 4026 4027 // Post the breakpoint event. 4028 __ get_method(Z_ARG2); 4029 __ call_VM(noreg, 4030 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), 4031 Z_ARG2, Z_bcp); 4032 4033 // Must restore the bytecode, because call_VM destroys Z_bytecode. 4034 __ z_lgr(Z_bytecode, Z_tmp_1); 4035 4036 // Complete the execution of original bytecode. 4037 __ dispatch_only_normal(vtos); 4038 } 4039 4040 4041 // Exceptions 4042 4043 void TemplateTable::athrow() { 4044 transition(atos, vtos); 4045 __ null_check(Z_tos); 4046 __ load_absolute_address(Z_ARG2, Interpreter::throw_exception_entry()); 4047 __ z_br(Z_ARG2); 4048 } 4049 4050 // Synchronization 4051 // 4052 // Note: monitorenter & exit are symmetric routines; which is reflected 4053 // in the assembly code structure as well 4054 // 4055 // Stack layout: 4056 // 4057 // callers_sp <- Z_SP (callers_sp == Z_fp (own fp)) 4058 // return_pc 4059 // [rest of ABI_160] 4060 // /slot o: free 4061 // / ... free 4062 // oper. | slot n+1: free <- Z_esp points to first free slot 4063 // stack | slot n: val caches IJAVA_STATE.esp 4064 // | ... 4065 // \slot 0: val 4066 // /slot m <- IJAVA_STATE.monitors = monitor block top 4067 // | ... 4068 // monitors| slot 2 4069 // | slot 1 4070 // \slot 0 4071 // /slot l <- monitor block bot 4072 // ijava_state | ... 4073 // | slot 2 4074 // \slot 0 4075 // <- Z_fp 4076 void TemplateTable::monitorenter() { 4077 transition(atos, vtos); 4078 4079 BLOCK_COMMENT("monitorenter {"); 4080 4081 // Check for NULL object. 4082 __ null_check(Z_tos); 4083 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 4084 NearLabel allocated; 4085 // Initialize entry pointer. 4086 const Register Rfree_slot = Z_tmp_1; 4087 __ clear_reg(Rfree_slot, true, false); // Points to free slot or NULL. Don't set CC. 4088 4089 // Find a free slot in the monitor block from top to bot (result in Rfree_slot). 4090 { 4091 const Register Rcurr_monitor = Z_ARG2; 4092 const Register Rbot = Z_ARG3; // Points to word under bottom of monitor block. 4093 const Register Rlocked_obj = Z_ARG4; 4094 NearLabel loop, exit, not_free; 4095 // Starting with top-most entry. 4096 __ get_monitors(Rcurr_monitor); // Rcur_monitor = IJAVA_STATE.monitors 4097 __ add2reg(Rbot, -frame::z_ijava_state_size, Z_fp); 4098 4099 #ifdef ASSERT 4100 address reentry = NULL; 4101 { NearLabel ok; 4102 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondNotHigh, ok); 4103 reentry = __ stop_chain_static(reentry, "IJAVA_STATE.monitors points below monitor block bottom"); 4104 __ bind(ok); 4105 } 4106 { NearLabel ok; 4107 __ compareU64_and_branch(Rcurr_monitor, Z_esp, Assembler::bcondHigh, ok); 4108 reentry = __ stop_chain_static(reentry, "IJAVA_STATE.monitors above Z_esp"); 4109 __ bind(ok); 4110 } 4111 #endif 4112 4113 // Check if bottom reached, i.e. if there is at least one monitor. 4114 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondEqual, exit); 4115 4116 __ bind(loop); 4117 // Check if current entry is used. 4118 __ load_and_test_long(Rlocked_obj, Address(Rcurr_monitor, BasicObjectLock::obj_offset_in_bytes())); 4119 __ z_brne(not_free); 4120 // If not used then remember entry in Rfree_slot. 4121 __ z_lgr(Rfree_slot, Rcurr_monitor); 4122 __ bind(not_free); 4123 // Exit if current entry is for same object; this guarantees, that new monitor 4124 // used for recursive lock is above the older one. 4125 __ compareU64_and_branch(Rlocked_obj, Z_tos, Assembler::bcondEqual, exit); 4126 // otherwise advance to next entry 4127 __ add2reg(Rcurr_monitor, entry_size); 4128 // Check if bottom reached, if not at bottom then check this entry. 4129 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondNotEqual, loop); 4130 __ bind(exit); 4131 } 4132 4133 // Rfree_slot != NULL -> found one 4134 __ compareU64_and_branch(Rfree_slot, (intptr_t)0L, Assembler::bcondNotEqual, allocated); 4135 4136 // Allocate one if there's no free slot. 4137 __ add_monitor_to_stack(false, Z_ARG3, Z_ARG4, Z_ARG5); 4138 __ get_monitors(Rfree_slot); 4139 4140 // Rfree_slot: points to monitor entry. 4141 __ bind(allocated); 4142 4143 // Increment bcp to point to the next bytecode, so exception 4144 // handling for async. exceptions work correctly. 4145 // The object has already been poped from the stack, so the 4146 // expression stack looks correct. 4147 __ add2reg(Z_bcp, 1, Z_bcp); 4148 4149 // Store object. 4150 __ z_stg(Z_tos, BasicObjectLock::obj_offset_in_bytes(), Rfree_slot); 4151 __ lock_object(Rfree_slot, Z_tos); 4152 4153 // Check to make sure this monitor doesn't cause stack overflow after locking. 4154 __ save_bcp(); // in case of exception 4155 __ generate_stack_overflow_check(0); 4156 4157 // The bcp has already been incremented. Just need to dispatch to 4158 // next instruction. 4159 __ dispatch_next(vtos); 4160 4161 BLOCK_COMMENT("} monitorenter"); 4162 } 4163 4164 4165 void TemplateTable::monitorexit() { 4166 transition(atos, vtos); 4167 4168 BLOCK_COMMENT("monitorexit {"); 4169 4170 // Check for NULL object. 4171 __ null_check(Z_tos); 4172 4173 NearLabel found, not_found; 4174 const Register Rcurr_monitor = Z_ARG2; 4175 4176 // Find matching slot. 4177 { 4178 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 4179 NearLabel entry, loop; 4180 4181 const Register Rbot = Z_ARG3; // Points to word under bottom of monitor block. 4182 const Register Rlocked_obj = Z_ARG4; 4183 // Starting with top-most entry. 4184 __ get_monitors(Rcurr_monitor); // Rcur_monitor = IJAVA_STATE.monitors 4185 __ add2reg(Rbot, -frame::z_ijava_state_size, Z_fp); 4186 4187 #ifdef ASSERT 4188 address reentry = NULL; 4189 { NearLabel ok; 4190 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondNotHigh, ok); 4191 reentry = __ stop_chain_static(reentry, "IJAVA_STATE.monitors points below monitor block bottom"); 4192 __ bind(ok); 4193 } 4194 { NearLabel ok; 4195 __ compareU64_and_branch(Rcurr_monitor, Z_esp, Assembler::bcondHigh, ok); 4196 reentry = __ stop_chain_static(reentry, "IJAVA_STATE.monitors above Z_esp"); 4197 __ bind(ok); 4198 } 4199 #endif 4200 4201 // Check if bottom reached, i.e. if there is at least one monitor. 4202 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondEqual, not_found); 4203 4204 __ bind(loop); 4205 // Check if current entry is for same object. 4206 __ z_lg(Rlocked_obj, Address(Rcurr_monitor, BasicObjectLock::obj_offset_in_bytes())); 4207 // If same object then stop searching. 4208 __ compareU64_and_branch(Rlocked_obj, Z_tos, Assembler::bcondEqual, found); 4209 // Otherwise advance to next entry. 4210 __ add2reg(Rcurr_monitor, entry_size); 4211 // Check if bottom reached, if not at bottom then check this entry. 4212 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondNotEqual, loop); 4213 } 4214 4215 __ bind(not_found); 4216 // Error handling. Unlocking was not block-structured. 4217 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 4218 InterpreterRuntime::throw_illegal_monitor_state_exception)); 4219 __ should_not_reach_here(); 4220 4221 __ bind(found); 4222 __ push_ptr(Z_tos); // Make sure object is on stack (contract with oopMaps). 4223 __ unlock_object(Rcurr_monitor, Z_tos); 4224 __ pop_ptr(Z_tos); // Discard object. 4225 BLOCK_COMMENT("} monitorexit"); 4226 } 4227 4228 // Wide instructions 4229 void TemplateTable::wide() { 4230 transition(vtos, vtos); 4231 4232 __ z_llgc(Z_R1_scratch, at_bcp(1)); 4233 __ z_sllg(Z_R1_scratch, Z_R1_scratch, LogBytesPerWord); 4234 __ load_absolute_address(Z_tmp_1, (address) Interpreter::_wentry_point); 4235 __ mem2reg_opt(Z_tmp_1, Address(Z_tmp_1, Z_R1_scratch)); 4236 __ z_br(Z_tmp_1); 4237 // Note: the bcp increment step is part of the individual wide 4238 // bytecode implementations. 4239 } 4240 4241 // Multi arrays 4242 void TemplateTable::multianewarray() { 4243 transition(vtos, atos); 4244 4245 __ z_llgc(Z_tmp_1, at_bcp(3)); // Get number of dimensions. 4246 // Slot count to byte offset. 4247 __ z_sllg(Z_tmp_1, Z_tmp_1, Interpreter::logStackElementSize); 4248 // Z_esp points past last_dim, so set to Z_ARG2 to first_dim address. 4249 __ load_address(Z_ARG2, Address(Z_esp, Z_tmp_1)); 4250 call_VM(Z_RET, 4251 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), 4252 Z_ARG2); 4253 // Pop dimensions from expression stack. 4254 __ z_agr(Z_esp, Z_tmp_1); 4255 }