1 /* 2 * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016, 2017 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterRuntime.hpp" 30 #include "interpreter/interp_masm.hpp" 31 #include "interpreter/templateTable.hpp" 32 #include "memory/universe.inline.hpp" 33 #include "oops/objArrayKlass.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "prims/methodHandles.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/stubRoutines.hpp" 38 #include "runtime/synchronizer.hpp" 39 40 #ifdef PRODUCT 41 #define __ _masm-> 42 #define BLOCK_COMMENT(str) 43 #define BIND(label) __ bind(label); 44 #else 45 #define __ (PRODUCT_ONLY(false&&)Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)-> 46 #define BLOCK_COMMENT(str) __ block_comment(str) 47 #define BIND(label) __ bind(label); BLOCK_COMMENT(#label ":") 48 #endif 49 50 // The assumed minimum size of a BranchTableBlock. 51 // The actual size of each block heavily depends on the CPU capabilities and, 52 // of course, on the logic implemented in each block. 53 #ifdef ASSERT 54 #define BTB_MINSIZE 256 55 #else 56 #define BTB_MINSIZE 64 57 #endif 58 59 #ifdef ASSERT 60 // Macro to open a BranchTableBlock (a piece of code that is branched to by a calculated branch). 61 #define BTB_BEGIN(lbl, alignment, name) \ 62 __ align_address(alignment); \ 63 __ bind(lbl); \ 64 { unsigned int b_off = __ offset(); \ 65 uintptr_t b_addr = (uintptr_t)__ pc(); \ 66 __ z_larl(Z_R0, (int64_t)0); /* Check current address alignment. */ \ 67 __ z_slgr(Z_R0, br_tab); /* Current Address must be equal */ \ 68 __ z_slgr(Z_R0, flags); /* to calculated branch target. */ \ 69 __ z_brc(Assembler::bcondLogZero, 3); /* skip trap if ok. */ \ 70 __ z_illtrap(0x55); \ 71 guarantee(b_addr%alignment == 0, "bad alignment at begin of block" name); 72 73 // Macro to close a BranchTableBlock (a piece of code that is branched to by a calculated branch). 74 #define BTB_END(lbl, alignment, name) \ 75 uintptr_t e_addr = (uintptr_t)__ pc(); \ 76 unsigned int e_off = __ offset(); \ 77 unsigned int len = e_off-b_off; \ 78 if (len > alignment) { \ 79 tty->print_cr("%4d of %4d @ " INTPTR_FORMAT ": Block len for %s", \ 80 len, alignment, e_addr-len, name); \ 81 guarantee(len <= alignment, "block too large"); \ 82 } \ 83 guarantee(len == e_addr-b_addr, "block len mismatch"); \ 84 } 85 #else 86 // Macro to open a BranchTableBlock (a piece of code that is branched to by a calculated branch). 87 #define BTB_BEGIN(lbl, alignment, name) \ 88 __ align_address(alignment); \ 89 __ bind(lbl); \ 90 { unsigned int b_off = __ offset(); \ 91 uintptr_t b_addr = (uintptr_t)__ pc(); \ 92 guarantee(b_addr%alignment == 0, "bad alignment at begin of block" name); 93 94 // Macro to close a BranchTableBlock (a piece of code that is branched to by a calculated branch). 95 #define BTB_END(lbl, alignment, name) \ 96 uintptr_t e_addr = (uintptr_t)__ pc(); \ 97 unsigned int e_off = __ offset(); \ 98 unsigned int len = e_off-b_off; \ 99 if (len > alignment) { \ 100 tty->print_cr("%4d of %4d @ " INTPTR_FORMAT ": Block len for %s", \ 101 len, alignment, e_addr-len, name); \ 102 guarantee(len <= alignment, "block too large"); \ 103 } \ 104 guarantee(len == e_addr-b_addr, "block len mismatch"); \ 105 } 106 #endif // ASSERT 107 108 // Platform-dependent initialization. 109 110 void TemplateTable::pd_initialize() { 111 // No specific initialization. 112 } 113 114 // Address computation: local variables 115 116 static inline Address iaddress(int n) { 117 return Address(Z_locals, Interpreter::local_offset_in_bytes(n)); 118 } 119 120 static inline Address laddress(int n) { 121 return iaddress(n + 1); 122 } 123 124 static inline Address faddress(int n) { 125 return iaddress(n); 126 } 127 128 static inline Address daddress(int n) { 129 return laddress(n); 130 } 131 132 static inline Address aaddress(int n) { 133 return iaddress(n); 134 } 135 136 // Pass NULL, if no shift instruction should be emitted. 137 static inline Address iaddress(InterpreterMacroAssembler *masm, Register r) { 138 if (masm) { 139 masm->z_sllg(r, r, LogBytesPerWord); // index2bytes 140 } 141 return Address(Z_locals, r, Interpreter::local_offset_in_bytes(0)); 142 } 143 144 // Pass NULL, if no shift instruction should be emitted. 145 static inline Address laddress(InterpreterMacroAssembler *masm, Register r) { 146 if (masm) { 147 masm->z_sllg(r, r, LogBytesPerWord); // index2bytes 148 } 149 return Address(Z_locals, r, Interpreter::local_offset_in_bytes(1) ); 150 } 151 152 static inline Address faddress(InterpreterMacroAssembler *masm, Register r) { 153 return iaddress(masm, r); 154 } 155 156 static inline Address daddress(InterpreterMacroAssembler *masm, Register r) { 157 return laddress(masm, r); 158 } 159 160 static inline Address aaddress(InterpreterMacroAssembler *masm, Register r) { 161 return iaddress(masm, r); 162 } 163 164 // At top of Java expression stack which may be different than esp(). It 165 // isn't for category 1 objects. 166 static inline Address at_tos(int slot = 0) { 167 return Address(Z_esp, Interpreter::expr_offset_in_bytes(slot)); 168 } 169 170 // Condition conversion 171 static Assembler::branch_condition j_not(TemplateTable::Condition cc) { 172 switch (cc) { 173 case TemplateTable::equal : 174 return Assembler::bcondNotEqual; 175 case TemplateTable::not_equal : 176 return Assembler::bcondEqual; 177 case TemplateTable::less : 178 return Assembler::bcondNotLow; 179 case TemplateTable::less_equal : 180 return Assembler::bcondHigh; 181 case TemplateTable::greater : 182 return Assembler::bcondNotHigh; 183 case TemplateTable::greater_equal: 184 return Assembler::bcondLow; 185 } 186 ShouldNotReachHere(); 187 return Assembler::bcondZero; 188 } 189 190 // Do an oop store like *(base + offset) = val 191 // offset can be a register or a constant. 192 static void do_oop_store(InterpreterMacroAssembler* _masm, 193 Register base, 194 RegisterOrConstant offset, 195 Register val, 196 bool val_is_null, // == false does not guarantee that val really is not equal NULL. 197 Register tmp1, // If tmp3 is volatile, either tmp1 or tmp2 must be 198 Register tmp2, // non-volatile to hold a copy of pre_val across runtime calls. 199 Register tmp3, // Ideally, this tmp register is non-volatile, as it is used to 200 // hold pre_val (must survive runtime calls). 201 BarrierSet::Name barrier, 202 bool precise) { 203 BLOCK_COMMENT("do_oop_store {"); 204 assert(val != noreg, "val must always be valid, even if it is zero"); 205 assert_different_registers(tmp1, tmp2, tmp3, val, base, offset.register_or_noreg()); 206 __ verify_oop(val); 207 switch (barrier) { 208 #if INCLUDE_ALL_GCS 209 case BarrierSet::G1SATBCTLogging: 210 { 211 #ifdef ASSERT 212 if (val_is_null) { // Check if the flag setting reflects reality. 213 Label OK; 214 __ z_ltgr(val, val); 215 __ z_bre(OK); 216 __ z_illtrap(0x11); 217 __ bind(OK); 218 } 219 #endif 220 Register pre_val = tmp3; 221 // Load and record the previous value. 222 __ g1_write_barrier_pre(base, offset, pre_val, val, 223 tmp1, tmp2, 224 false); // Needs to hold pre_val in non_volatile register? 225 226 if (val_is_null) { 227 __ store_heap_oop_null(val, offset, base); 228 } else { 229 Label Done; 230 // val_is_null == false does not guarantee that val really is not equal NULL. 231 // Checking for this case dynamically has some cost, but also some benefit (in GC). 232 // It's hard to say if cost or benefit is greater. 233 { Label OK; 234 __ z_ltgr(val, val); 235 __ z_brne(OK); 236 __ store_heap_oop_null(val, offset, base); 237 __ z_bru(Done); 238 __ bind(OK); 239 } 240 // G1 barrier needs uncompressed oop for region cross check. 241 // Store_heap_oop compresses the oop in the argument register. 242 Register val_work = val; 243 if (UseCompressedOops) { 244 val_work = tmp3; 245 __ z_lgr(val_work, val); 246 } 247 __ store_heap_oop_not_null(val_work, offset, base); 248 249 // We need precise card marks for oop array stores. 250 // Otherwise, cardmarking the object which contains the oop is sufficient. 251 if (precise && !(offset.is_constant() && offset.as_constant() == 0)) { 252 __ add2reg_with_index(base, 253 offset.constant_or_zero(), 254 offset.register_or_noreg(), 255 base); 256 } 257 __ g1_write_barrier_post(base /* store_adr */, val, tmp1, tmp2, tmp3); 258 __ bind(Done); 259 } 260 } 261 break; 262 #endif // INCLUDE_ALL_GCS 263 case BarrierSet::CardTableForRS: 264 case BarrierSet::CardTableExtension: 265 { 266 if (val_is_null) { 267 __ store_heap_oop_null(val, offset, base); 268 } else { 269 __ store_heap_oop(val, offset, base); 270 // Flatten object address if needed. 271 if (precise && ((offset.register_or_noreg() != noreg) || (offset.constant_or_zero() != 0))) { 272 __ load_address(base, Address(base, offset.register_or_noreg(), offset.constant_or_zero())); 273 } 274 __ card_write_barrier_post(base, tmp1); 275 } 276 } 277 break; 278 case BarrierSet::ModRef: 279 // fall through 280 default: 281 ShouldNotReachHere(); 282 283 } 284 BLOCK_COMMENT("} do_oop_store"); 285 } 286 287 Address TemplateTable::at_bcp(int offset) { 288 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); 289 return Address(Z_bcp, offset); 290 } 291 292 void TemplateTable::patch_bytecode(Bytecodes::Code bc, 293 Register bc_reg, 294 Register temp_reg, 295 bool load_bc_into_bc_reg, // = true 296 int byte_no) { 297 if (!RewriteBytecodes) { return; } 298 299 NearLabel L_patch_done; 300 BLOCK_COMMENT("patch_bytecode {"); 301 302 switch (bc) { 303 case Bytecodes::_fast_aputfield: 304 case Bytecodes::_fast_bputfield: 305 case Bytecodes::_fast_zputfield: 306 case Bytecodes::_fast_cputfield: 307 case Bytecodes::_fast_dputfield: 308 case Bytecodes::_fast_fputfield: 309 case Bytecodes::_fast_iputfield: 310 case Bytecodes::_fast_lputfield: 311 case Bytecodes::_fast_sputfield: 312 { 313 // We skip bytecode quickening for putfield instructions when 314 // the put_code written to the constant pool cache is zero. 315 // This is required so that every execution of this instruction 316 // calls out to InterpreterRuntime::resolve_get_put to do 317 // additional, required work. 318 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 319 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 320 __ get_cache_and_index_and_bytecode_at_bcp(Z_R1_scratch, bc_reg, 321 temp_reg, byte_no, 1); 322 __ load_const_optimized(bc_reg, bc); 323 __ compareU32_and_branch(temp_reg, (intptr_t)0, 324 Assembler::bcondZero, L_patch_done); 325 } 326 break; 327 default: 328 assert(byte_no == -1, "sanity"); 329 // The pair bytecodes have already done the load. 330 if (load_bc_into_bc_reg) { 331 __ load_const_optimized(bc_reg, bc); 332 } 333 break; 334 } 335 336 if (JvmtiExport::can_post_breakpoint()) { 337 338 Label L_fast_patch; 339 340 // If a breakpoint is present we can't rewrite the stream directly. 341 __ z_cli(at_bcp(0), Bytecodes::_breakpoint); 342 __ z_brne(L_fast_patch); 343 __ get_method(temp_reg); 344 // Let breakpoint table handling rewrite to quicker bytecode. 345 __ call_VM_static(noreg, 346 CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), 347 temp_reg, Z_R13, bc_reg); 348 __ z_bru(L_patch_done); 349 350 __ bind(L_fast_patch); 351 } 352 353 #ifdef ASSERT 354 NearLabel L_okay; 355 356 // We load into 64 bits, since this works on any CPU. 357 __ z_llgc(temp_reg, at_bcp(0)); 358 __ compareU32_and_branch(temp_reg, Bytecodes::java_code(bc), 359 Assembler::bcondEqual, L_okay ); 360 __ compareU32_and_branch(temp_reg, bc_reg, Assembler::bcondEqual, L_okay); 361 __ stop_static("patching the wrong bytecode"); 362 __ bind(L_okay); 363 #endif 364 365 // Patch bytecode. 366 __ z_stc(bc_reg, at_bcp(0)); 367 368 __ bind(L_patch_done); 369 BLOCK_COMMENT("} patch_bytecode"); 370 } 371 372 // Individual instructions 373 374 void TemplateTable::nop() { 375 transition(vtos, vtos); 376 } 377 378 void TemplateTable::shouldnotreachhere() { 379 transition(vtos, vtos); 380 __ stop("shouldnotreachhere bytecode"); 381 } 382 383 void TemplateTable::aconst_null() { 384 transition(vtos, atos); 385 __ clear_reg(Z_tos, true, false); 386 } 387 388 void TemplateTable::iconst(int value) { 389 transition(vtos, itos); 390 // Zero extension of the iconst makes zero extension at runtime obsolete. 391 __ load_const_optimized(Z_tos, ((unsigned long)(unsigned int)value)); 392 } 393 394 void TemplateTable::lconst(int value) { 395 transition(vtos, ltos); 396 __ load_const_optimized(Z_tos, value); 397 } 398 399 // No pc-relative load/store for floats. 400 void TemplateTable::fconst(int value) { 401 transition(vtos, ftos); 402 static float one = 1.0f, two = 2.0f; 403 404 switch (value) { 405 case 0: 406 __ z_lzer(Z_ftos); 407 return; 408 case 1: 409 __ load_absolute_address(Z_R1_scratch, (address) &one); 410 __ mem2freg_opt(Z_ftos, Address(Z_R1_scratch), false); 411 return; 412 case 2: 413 __ load_absolute_address(Z_R1_scratch, (address) &two); 414 __ mem2freg_opt(Z_ftos, Address(Z_R1_scratch), false); 415 return; 416 default: 417 ShouldNotReachHere(); 418 return; 419 } 420 } 421 422 void TemplateTable::dconst(int value) { 423 transition(vtos, dtos); 424 static double one = 1.0; 425 426 switch (value) { 427 case 0: 428 __ z_lzdr(Z_ftos); 429 return; 430 case 1: 431 __ load_absolute_address(Z_R1_scratch, (address) &one); 432 __ mem2freg_opt(Z_ftos, Address(Z_R1_scratch)); 433 return; 434 default: 435 ShouldNotReachHere(); 436 return; 437 } 438 } 439 440 void TemplateTable::bipush() { 441 transition(vtos, itos); 442 __ z_lb(Z_tos, at_bcp(1)); 443 } 444 445 void TemplateTable::sipush() { 446 transition(vtos, itos); 447 __ get_2_byte_integer_at_bcp(Z_tos, 1, InterpreterMacroAssembler::Signed); 448 } 449 450 451 void TemplateTable::ldc(bool wide) { 452 transition(vtos, vtos); 453 Label call_ldc, notFloat, notClass, Done; 454 const Register RcpIndex = Z_tmp_1; 455 const Register Rtags = Z_ARG2; 456 457 if (wide) { 458 __ get_2_byte_integer_at_bcp(RcpIndex, 1, InterpreterMacroAssembler::Unsigned); 459 } else { 460 __ z_llgc(RcpIndex, at_bcp(1)); 461 } 462 463 __ get_cpool_and_tags(Z_tmp_2, Rtags); 464 465 const int base_offset = ConstantPool::header_size() * wordSize; 466 const int tags_offset = Array<u1>::base_offset_in_bytes(); 467 const Register Raddr_type = Rtags; 468 469 // Get address of type. 470 __ add2reg_with_index(Raddr_type, tags_offset, RcpIndex, Rtags); 471 472 __ z_cli(0, Raddr_type, JVM_CONSTANT_UnresolvedClass); 473 __ z_bre(call_ldc); // Unresolved class - get the resolved class. 474 475 __ z_cli(0, Raddr_type, JVM_CONSTANT_UnresolvedClassInError); 476 __ z_bre(call_ldc); // Unresolved class in error state - call into runtime 477 // to throw the error from the first resolution attempt. 478 479 __ z_cli(0, Raddr_type, JVM_CONSTANT_Class); 480 __ z_brne(notClass); // Resolved class - need to call vm to get java 481 // mirror of the class. 482 483 // We deal with a class. Call vm to do the appropriate. 484 __ bind(call_ldc); 485 __ load_const_optimized(Z_ARG2, wide); 486 call_VM(Z_RET, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), Z_ARG2); 487 __ push_ptr(Z_RET); 488 __ z_bru(Done); 489 490 // Not a class. 491 __ bind(notClass); 492 Register RcpOffset = RcpIndex; 493 __ z_sllg(RcpOffset, RcpIndex, LogBytesPerWord); // Convert index to offset. 494 __ z_cli(0, Raddr_type, JVM_CONSTANT_Float); 495 __ z_brne(notFloat); 496 497 // ftos 498 __ mem2freg_opt(Z_ftos, Address(Z_tmp_2, RcpOffset, base_offset), false); 499 __ push_f(); 500 __ z_bru(Done); 501 502 __ bind(notFloat); 503 #ifdef ASSERT 504 { 505 Label L; 506 507 __ z_cli(0, Raddr_type, JVM_CONSTANT_Integer); 508 __ z_bre(L); 509 // String and Object are rewritten to fast_aldc. 510 __ stop("unexpected tag type in ldc"); 511 512 __ bind(L); 513 } 514 #endif 515 516 // itos 517 __ mem2reg_opt(Z_tos, Address(Z_tmp_2, RcpOffset, base_offset), false); 518 __ push_i(Z_tos); 519 520 __ bind(Done); 521 } 522 523 // Fast path for caching oop constants. 524 // %%% We should use this to handle Class and String constants also. 525 // %%% It will simplify the ldc/primitive path considerably. 526 void TemplateTable::fast_aldc(bool wide) { 527 transition(vtos, atos); 528 529 const Register index = Z_tmp_2; 530 int index_size = wide ? sizeof(u2) : sizeof(u1); 531 Label L_resolved; 532 533 // We are resolved if the resolved reference cache entry contains a 534 // non-null object (CallSite, etc.). 535 __ get_cache_index_at_bcp(index, 1, index_size); // Load index. 536 __ load_resolved_reference_at_index(Z_tos, index); 537 __ z_ltgr(Z_tos, Z_tos); 538 __ z_brne(L_resolved); 539 540 // First time invocation - must resolve first. 541 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 542 __ load_const_optimized(Z_ARG1, (int)bytecode()); 543 __ call_VM(Z_tos, entry, Z_ARG1); 544 545 __ bind(L_resolved); 546 __ verify_oop(Z_tos); 547 } 548 549 void TemplateTable::ldc2_w() { 550 transition(vtos, vtos); 551 Label Long, Done; 552 553 // Z_tmp_1 = index of cp entry 554 __ get_2_byte_integer_at_bcp(Z_tmp_1, 1, InterpreterMacroAssembler::Unsigned); 555 556 __ get_cpool_and_tags(Z_tmp_2, Z_tos); 557 558 const int base_offset = ConstantPool::header_size() * wordSize; 559 const int tags_offset = Array<u1>::base_offset_in_bytes(); 560 561 // Get address of type. 562 __ add2reg_with_index(Z_tos, tags_offset, Z_tos, Z_tmp_1); 563 564 // Index needed in both branches, so calculate here. 565 __ z_sllg(Z_tmp_1, Z_tmp_1, LogBytesPerWord); // index2bytes 566 567 // Check type. 568 __ z_cli(0, Z_tos, JVM_CONSTANT_Double); 569 __ z_brne(Long); 570 571 // dtos 572 __ mem2freg_opt(Z_ftos, Address(Z_tmp_2, Z_tmp_1, base_offset)); 573 __ push_d(); 574 __ z_bru(Done); 575 576 __ bind(Long); 577 // ltos 578 __ mem2reg_opt(Z_tos, Address(Z_tmp_2, Z_tmp_1, base_offset)); 579 __ push_l(); 580 581 __ bind(Done); 582 } 583 584 void TemplateTable::locals_index(Register reg, int offset) { 585 __ z_llgc(reg, at_bcp(offset)); 586 __ z_lcgr(reg); 587 } 588 589 void TemplateTable::iload() { 590 iload_internal(); 591 } 592 593 void TemplateTable::nofast_iload() { 594 iload_internal(may_not_rewrite); 595 } 596 597 void TemplateTable::iload_internal(RewriteControl rc) { 598 transition(vtos, itos); 599 600 if (RewriteFrequentPairs && rc == may_rewrite) { 601 NearLabel rewrite, done; 602 const Register bc = Z_ARG4; 603 604 assert(Z_R1_scratch != bc, "register damaged"); 605 606 // Get next byte. 607 __ z_llgc(Z_R1_scratch, at_bcp(Bytecodes::length_for (Bytecodes::_iload))); 608 609 // If _iload, wait to rewrite to iload2. We only want to rewrite the 610 // last two iloads in a pair. Comparing against fast_iload means that 611 // the next bytecode is neither an iload or a caload, and therefore 612 // an iload pair. 613 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_iload, 614 Assembler::bcondEqual, done); 615 616 __ load_const_optimized(bc, Bytecodes::_fast_iload2); 617 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_fast_iload, 618 Assembler::bcondEqual, rewrite); 619 620 // If _caload, rewrite to fast_icaload. 621 __ load_const_optimized(bc, Bytecodes::_fast_icaload); 622 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_caload, 623 Assembler::bcondEqual, rewrite); 624 625 // Rewrite so iload doesn't check again. 626 __ load_const_optimized(bc, Bytecodes::_fast_iload); 627 628 // rewrite 629 // bc: fast bytecode 630 __ bind(rewrite); 631 patch_bytecode(Bytecodes::_iload, bc, Z_R1_scratch, false); 632 633 __ bind(done); 634 635 } 636 637 // Get the local value into tos. 638 locals_index(Z_R1_scratch); 639 __ mem2reg_opt(Z_tos, iaddress(_masm, Z_R1_scratch), false); 640 } 641 642 void TemplateTable::fast_iload2() { 643 transition(vtos, itos); 644 645 locals_index(Z_R1_scratch); 646 __ mem2reg_opt(Z_tos, iaddress(_masm, Z_R1_scratch), false); 647 __ push_i(Z_tos); 648 locals_index(Z_R1_scratch, 3); 649 __ mem2reg_opt(Z_tos, iaddress(_masm, Z_R1_scratch), false); 650 } 651 652 void TemplateTable::fast_iload() { 653 transition(vtos, itos); 654 655 locals_index(Z_R1_scratch); 656 __ mem2reg_opt(Z_tos, iaddress(_masm, Z_R1_scratch), false); 657 } 658 659 void TemplateTable::lload() { 660 transition(vtos, ltos); 661 662 locals_index(Z_R1_scratch); 663 __ mem2reg_opt(Z_tos, laddress(_masm, Z_R1_scratch)); 664 } 665 666 void TemplateTable::fload() { 667 transition(vtos, ftos); 668 669 locals_index(Z_R1_scratch); 670 __ mem2freg_opt(Z_ftos, faddress(_masm, Z_R1_scratch), false); 671 } 672 673 void TemplateTable::dload() { 674 transition(vtos, dtos); 675 676 locals_index(Z_R1_scratch); 677 __ mem2freg_opt(Z_ftos, daddress(_masm, Z_R1_scratch)); 678 } 679 680 void TemplateTable::aload() { 681 transition(vtos, atos); 682 683 locals_index(Z_R1_scratch); 684 __ mem2reg_opt(Z_tos, aaddress(_masm, Z_R1_scratch)); 685 } 686 687 void TemplateTable::locals_index_wide(Register reg) { 688 __ get_2_byte_integer_at_bcp(reg, 2, InterpreterMacroAssembler::Unsigned); 689 __ z_lcgr(reg); 690 } 691 692 void TemplateTable::wide_iload() { 693 transition(vtos, itos); 694 695 locals_index_wide(Z_tmp_1); 696 __ mem2reg_opt(Z_tos, iaddress(_masm, Z_tmp_1), false); 697 } 698 699 void TemplateTable::wide_lload() { 700 transition(vtos, ltos); 701 702 locals_index_wide(Z_tmp_1); 703 __ mem2reg_opt(Z_tos, laddress(_masm, Z_tmp_1)); 704 } 705 706 void TemplateTable::wide_fload() { 707 transition(vtos, ftos); 708 709 locals_index_wide(Z_tmp_1); 710 __ mem2freg_opt(Z_ftos, faddress(_masm, Z_tmp_1), false); 711 } 712 713 void TemplateTable::wide_dload() { 714 transition(vtos, dtos); 715 716 locals_index_wide(Z_tmp_1); 717 __ mem2freg_opt(Z_ftos, daddress(_masm, Z_tmp_1)); 718 } 719 720 void TemplateTable::wide_aload() { 721 transition(vtos, atos); 722 723 locals_index_wide(Z_tmp_1); 724 __ mem2reg_opt(Z_tos, aaddress(_masm, Z_tmp_1)); 725 } 726 727 void TemplateTable::index_check(Register array, Register index, unsigned int shift) { 728 assert_different_registers(Z_R1_scratch, array, index); 729 730 // Check array. 731 __ null_check(array, Z_R0_scratch, arrayOopDesc::length_offset_in_bytes()); 732 733 // Sign extend index for use by indexed load. 734 __ z_lgfr(index, index); 735 736 // Check index. 737 Label index_ok; 738 __ z_cl(index, Address(array, arrayOopDesc::length_offset_in_bytes())); 739 __ z_brl(index_ok); 740 __ lgr_if_needed(Z_ARG3, index); // See generate_ArrayIndexOutOfBounds_handler(). 741 // Give back the array to create more detailed exceptions. 742 __ lgr_if_needed(Z_ARG2, array); // See generate_ArrayIndexOutOfBounds_handler(). 743 __ load_absolute_address(Z_R1_scratch, 744 Interpreter::_throw_ArrayIndexOutOfBoundsException_entry); 745 __ z_bcr(Assembler::bcondAlways, Z_R1_scratch); 746 __ bind(index_ok); 747 748 if (shift > 0) 749 __ z_sllg(index, index, shift); 750 } 751 752 void TemplateTable::iaload() { 753 transition(itos, itos); 754 755 __ pop_ptr(Z_tmp_1); // array 756 // Index is in Z_tos. 757 Register index = Z_tos; 758 index_check(Z_tmp_1, index, LogBytesPerInt); // Kills Z_ARG3. 759 // Load the value. 760 __ mem2reg_opt(Z_tos, 761 Address(Z_tmp_1, index, arrayOopDesc::base_offset_in_bytes(T_INT)), 762 false); 763 } 764 765 void TemplateTable::laload() { 766 transition(itos, ltos); 767 768 __ pop_ptr(Z_tmp_2); 769 // Z_tos : index 770 // Z_tmp_2 : array 771 Register index = Z_tos; 772 index_check(Z_tmp_2, index, LogBytesPerLong); 773 __ mem2reg_opt(Z_tos, 774 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_LONG))); 775 } 776 777 void TemplateTable::faload() { 778 transition(itos, ftos); 779 780 __ pop_ptr(Z_tmp_2); 781 // Z_tos : index 782 // Z_tmp_2 : array 783 Register index = Z_tos; 784 index_check(Z_tmp_2, index, LogBytesPerInt); 785 __ mem2freg_opt(Z_ftos, 786 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_FLOAT)), 787 false); 788 } 789 790 void TemplateTable::daload() { 791 transition(itos, dtos); 792 793 __ pop_ptr(Z_tmp_2); 794 // Z_tos : index 795 // Z_tmp_2 : array 796 Register index = Z_tos; 797 index_check(Z_tmp_2, index, LogBytesPerLong); 798 __ mem2freg_opt(Z_ftos, 799 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_DOUBLE))); 800 } 801 802 void TemplateTable::aaload() { 803 transition(itos, atos); 804 805 unsigned const int shift = LogBytesPerHeapOop; 806 __ pop_ptr(Z_tmp_1); // array 807 // Index is in Z_tos. 808 Register index = Z_tos; 809 index_check(Z_tmp_1, index, shift); 810 // Now load array element. 811 __ load_heap_oop(Z_tos, 812 Address(Z_tmp_1, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 813 __ verify_oop(Z_tos); 814 } 815 816 void TemplateTable::baload() { 817 transition(itos, itos); 818 819 __ pop_ptr(Z_tmp_1); 820 // Z_tos : index 821 // Z_tmp_1 : array 822 Register index = Z_tos; 823 index_check(Z_tmp_1, index, 0); 824 __ z_lb(Z_tos, 825 Address(Z_tmp_1, index, arrayOopDesc::base_offset_in_bytes(T_BYTE))); 826 } 827 828 void TemplateTable::caload() { 829 transition(itos, itos); 830 831 __ pop_ptr(Z_tmp_2); 832 // Z_tos : index 833 // Z_tmp_2 : array 834 Register index = Z_tos; 835 index_check(Z_tmp_2, index, LogBytesPerShort); 836 // Load into 64 bits, works on all CPUs. 837 __ z_llgh(Z_tos, 838 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 839 } 840 841 // Iload followed by caload frequent pair. 842 void TemplateTable::fast_icaload() { 843 transition(vtos, itos); 844 845 // Load index out of locals. 846 locals_index(Z_R1_scratch); 847 __ mem2reg_opt(Z_ARG3, iaddress(_masm, Z_R1_scratch), false); 848 // Z_ARG3 : index 849 // Z_tmp_2 : array 850 __ pop_ptr(Z_tmp_2); 851 index_check(Z_tmp_2, Z_ARG3, LogBytesPerShort); 852 // Load into 64 bits, works on all CPUs. 853 __ z_llgh(Z_tos, 854 Address(Z_tmp_2, Z_ARG3, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 855 } 856 857 void TemplateTable::saload() { 858 transition(itos, itos); 859 860 __ pop_ptr(Z_tmp_2); 861 // Z_tos : index 862 // Z_tmp_2 : array 863 Register index = Z_tos; 864 index_check(Z_tmp_2, index, LogBytesPerShort); 865 __ z_lh(Z_tos, 866 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_SHORT))); 867 } 868 869 void TemplateTable::iload(int n) { 870 transition(vtos, itos); 871 __ z_ly(Z_tos, iaddress(n)); 872 } 873 874 void TemplateTable::lload(int n) { 875 transition(vtos, ltos); 876 __ z_lg(Z_tos, laddress(n)); 877 } 878 879 void TemplateTable::fload(int n) { 880 transition(vtos, ftos); 881 __ mem2freg_opt(Z_ftos, faddress(n), false); 882 } 883 884 void TemplateTable::dload(int n) { 885 transition(vtos, dtos); 886 __ mem2freg_opt(Z_ftos, daddress(n)); 887 } 888 889 void TemplateTable::aload(int n) { 890 transition(vtos, atos); 891 __ mem2reg_opt(Z_tos, aaddress(n)); 892 } 893 894 void TemplateTable::aload_0() { 895 aload_0_internal(); 896 } 897 898 void TemplateTable::nofast_aload_0() { 899 aload_0_internal(may_not_rewrite); 900 } 901 902 void TemplateTable::aload_0_internal(RewriteControl rc) { 903 transition(vtos, atos); 904 905 // According to bytecode histograms, the pairs: 906 // 907 // _aload_0, _fast_igetfield 908 // _aload_0, _fast_agetfield 909 // _aload_0, _fast_fgetfield 910 // 911 // occur frequently. If RewriteFrequentPairs is set, the (slow) 912 // _aload_0 bytecode checks if the next bytecode is either 913 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then 914 // rewrites the current bytecode into a pair bytecode; otherwise it 915 // rewrites the current bytecode into _fast_aload_0 that doesn't do 916 // the pair check anymore. 917 // 918 // Note: If the next bytecode is _getfield, the rewrite must be 919 // delayed, otherwise we may miss an opportunity for a pair. 920 // 921 // Also rewrite frequent pairs 922 // aload_0, aload_1 923 // aload_0, iload_1 924 // These bytecodes with a small amount of code are most profitable 925 // to rewrite. 926 if (!(RewriteFrequentPairs && (rc == may_rewrite))) { 927 aload(0); 928 return; 929 } 930 931 NearLabel rewrite, done; 932 const Register bc = Z_ARG4; 933 934 assert(Z_R1_scratch != bc, "register damaged"); 935 // Get next byte. 936 __ z_llgc(Z_R1_scratch, at_bcp(Bytecodes::length_for (Bytecodes::_aload_0))); 937 938 // Do actual aload_0. 939 aload(0); 940 941 // If _getfield then wait with rewrite. 942 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_getfield, 943 Assembler::bcondEqual, done); 944 945 // If _igetfield then rewrite to _fast_iaccess_0. 946 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) 947 == Bytecodes::_aload_0, "fix bytecode definition"); 948 949 __ load_const_optimized(bc, Bytecodes::_fast_iaccess_0); 950 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_fast_igetfield, 951 Assembler::bcondEqual, rewrite); 952 953 // If _agetfield then rewrite to _fast_aaccess_0. 954 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) 955 == Bytecodes::_aload_0, "fix bytecode definition"); 956 957 __ load_const_optimized(bc, Bytecodes::_fast_aaccess_0); 958 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_fast_agetfield, 959 Assembler::bcondEqual, rewrite); 960 961 // If _fgetfield then rewrite to _fast_faccess_0. 962 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) 963 == Bytecodes::_aload_0, "fix bytecode definition"); 964 965 __ load_const_optimized(bc, Bytecodes::_fast_faccess_0); 966 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_fast_fgetfield, 967 Assembler::bcondEqual, rewrite); 968 969 // Else rewrite to _fast_aload0. 970 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) 971 == Bytecodes::_aload_0, "fix bytecode definition"); 972 __ load_const_optimized(bc, Bytecodes::_fast_aload_0); 973 974 // rewrite 975 // bc: fast bytecode 976 __ bind(rewrite); 977 978 patch_bytecode(Bytecodes::_aload_0, bc, Z_R1_scratch, false); 979 // Reload local 0 because of VM call inside patch_bytecode(). 980 // this may trigger GC and thus change the oop. 981 aload(0); 982 983 __ bind(done); 984 } 985 986 void TemplateTable::istore() { 987 transition(itos, vtos); 988 locals_index(Z_R1_scratch); 989 __ reg2mem_opt(Z_tos, iaddress(_masm, Z_R1_scratch), false); 990 } 991 992 void TemplateTable::lstore() { 993 transition(ltos, vtos); 994 locals_index(Z_R1_scratch); 995 __ reg2mem_opt(Z_tos, laddress(_masm, Z_R1_scratch)); 996 } 997 998 void TemplateTable::fstore() { 999 transition(ftos, vtos); 1000 locals_index(Z_R1_scratch); 1001 __ freg2mem_opt(Z_ftos, faddress(_masm, Z_R1_scratch)); 1002 } 1003 1004 void TemplateTable::dstore() { 1005 transition(dtos, vtos); 1006 locals_index(Z_R1_scratch); 1007 __ freg2mem_opt(Z_ftos, daddress(_masm, Z_R1_scratch)); 1008 } 1009 1010 void TemplateTable::astore() { 1011 transition(vtos, vtos); 1012 __ pop_ptr(Z_tos); 1013 locals_index(Z_R1_scratch); 1014 __ reg2mem_opt(Z_tos, aaddress(_masm, Z_R1_scratch)); 1015 } 1016 1017 void TemplateTable::wide_istore() { 1018 transition(vtos, vtos); 1019 __ pop_i(Z_tos); 1020 locals_index_wide(Z_tmp_1); 1021 __ reg2mem_opt(Z_tos, iaddress(_masm, Z_tmp_1), false); 1022 } 1023 1024 void TemplateTable::wide_lstore() { 1025 transition(vtos, vtos); 1026 __ pop_l(Z_tos); 1027 locals_index_wide(Z_tmp_1); 1028 __ reg2mem_opt(Z_tos, laddress(_masm, Z_tmp_1)); 1029 } 1030 1031 void TemplateTable::wide_fstore() { 1032 transition(vtos, vtos); 1033 __ pop_f(Z_ftos); 1034 locals_index_wide(Z_tmp_1); 1035 __ freg2mem_opt(Z_ftos, faddress(_masm, Z_tmp_1), false); 1036 } 1037 1038 void TemplateTable::wide_dstore() { 1039 transition(vtos, vtos); 1040 __ pop_d(Z_ftos); 1041 locals_index_wide(Z_tmp_1); 1042 __ freg2mem_opt(Z_ftos, daddress(_masm, Z_tmp_1)); 1043 } 1044 1045 void TemplateTable::wide_astore() { 1046 transition(vtos, vtos); 1047 __ pop_ptr(Z_tos); 1048 locals_index_wide(Z_tmp_1); 1049 __ reg2mem_opt(Z_tos, aaddress(_masm, Z_tmp_1)); 1050 } 1051 1052 void TemplateTable::iastore() { 1053 transition(itos, vtos); 1054 1055 Register index = Z_ARG3; // Index_check expects index in Z_ARG3. 1056 // Value is in Z_tos ... 1057 __ pop_i(index); // index 1058 __ pop_ptr(Z_tmp_1); // array 1059 index_check(Z_tmp_1, index, LogBytesPerInt); 1060 // ... and then move the value. 1061 __ reg2mem_opt(Z_tos, 1062 Address(Z_tmp_1, index, arrayOopDesc::base_offset_in_bytes(T_INT)), 1063 false); 1064 } 1065 1066 void TemplateTable::lastore() { 1067 transition(ltos, vtos); 1068 1069 __ pop_i(Z_ARG3); 1070 __ pop_ptr(Z_tmp_2); 1071 // Z_tos : value 1072 // Z_ARG3 : index 1073 // Z_tmp_2 : array 1074 index_check(Z_tmp_2, Z_ARG3, LogBytesPerLong); // Prefer index in Z_ARG3. 1075 __ reg2mem_opt(Z_tos, 1076 Address(Z_tmp_2, Z_ARG3, arrayOopDesc::base_offset_in_bytes(T_LONG))); 1077 } 1078 1079 void TemplateTable::fastore() { 1080 transition(ftos, vtos); 1081 1082 __ pop_i(Z_ARG3); 1083 __ pop_ptr(Z_tmp_2); 1084 // Z_ftos : value 1085 // Z_ARG3 : index 1086 // Z_tmp_2 : array 1087 index_check(Z_tmp_2, Z_ARG3, LogBytesPerInt); // Prefer index in Z_ARG3. 1088 __ freg2mem_opt(Z_ftos, 1089 Address(Z_tmp_2, Z_ARG3, arrayOopDesc::base_offset_in_bytes(T_FLOAT)), 1090 false); 1091 } 1092 1093 void TemplateTable::dastore() { 1094 transition(dtos, vtos); 1095 1096 __ pop_i(Z_ARG3); 1097 __ pop_ptr(Z_tmp_2); 1098 // Z_ftos : value 1099 // Z_ARG3 : index 1100 // Z_tmp_2 : array 1101 index_check(Z_tmp_2, Z_ARG3, LogBytesPerLong); // Prefer index in Z_ARG3. 1102 __ freg2mem_opt(Z_ftos, 1103 Address(Z_tmp_2, Z_ARG3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE))); 1104 } 1105 1106 void TemplateTable::aastore() { 1107 NearLabel is_null, ok_is_subtype, done; 1108 transition(vtos, vtos); 1109 1110 // stack: ..., array, index, value 1111 1112 Register Rvalue = Z_tos; 1113 Register Rarray = Z_ARG2; 1114 Register Rindex = Z_ARG3; // Convention for index_check(). 1115 1116 __ load_ptr(0, Rvalue); 1117 __ z_l(Rindex, Address(Z_esp, Interpreter::expr_offset_in_bytes(1))); 1118 __ load_ptr(2, Rarray); 1119 1120 unsigned const int shift = LogBytesPerHeapOop; 1121 index_check(Rarray, Rindex, shift); // side effect: Rindex = Rindex << shift 1122 Register Rstore_addr = Rindex; 1123 // Address where the store goes to, i.e. &(Rarry[index]) 1124 __ load_address(Rstore_addr, Address(Rarray, Rindex, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 1125 1126 // do array store check - check for NULL value first. 1127 __ compareU64_and_branch(Rvalue, (intptr_t)0, Assembler::bcondEqual, is_null); 1128 1129 Register Rsub_klass = Z_ARG4; 1130 Register Rsuper_klass = Z_ARG5; 1131 __ load_klass(Rsub_klass, Rvalue); 1132 // Load superklass. 1133 __ load_klass(Rsuper_klass, Rarray); 1134 __ z_lg(Rsuper_klass, Address(Rsuper_klass, ObjArrayKlass::element_klass_offset())); 1135 1136 // Generate a fast subtype check. Branch to ok_is_subtype if no failure. 1137 // Throw if failure. 1138 Register tmp1 = Z_tmp_1; 1139 Register tmp2 = Z_tmp_2; 1140 __ gen_subtype_check(Rsub_klass, Rsuper_klass, tmp1, tmp2, ok_is_subtype); 1141 1142 // Fall through on failure. 1143 // Object is in Rvalue == Z_tos. 1144 assert(Rvalue == Z_tos, "that's the expected location"); 1145 __ load_absolute_address(tmp1, Interpreter::_throw_ArrayStoreException_entry); 1146 __ z_br(tmp1); 1147 1148 // Come here on success. 1149 __ bind(ok_is_subtype); 1150 1151 // Now store using the appropriate barrier. 1152 Register tmp3 = Rsub_klass; 1153 do_oop_store(_masm, Rstore_addr, (intptr_t)0/*offset*/, Rvalue, false/*val==null*/, 1154 tmp3, tmp2, tmp1, _bs->kind(), true); 1155 __ z_bru(done); 1156 1157 // Have a NULL in Rvalue. 1158 __ bind(is_null); 1159 __ profile_null_seen(tmp1); 1160 1161 // Store a NULL. 1162 do_oop_store(_masm, Rstore_addr, (intptr_t)0/*offset*/, Rvalue, true/*val==null*/, 1163 tmp3, tmp2, tmp1, _bs->kind(), true); 1164 1165 // Pop stack arguments. 1166 __ bind(done); 1167 __ add2reg(Z_esp, 3 * Interpreter::stackElementSize); 1168 } 1169 1170 1171 void TemplateTable::bastore() { 1172 transition(itos, vtos); 1173 1174 __ pop_i(Z_ARG3); 1175 __ pop_ptr(Z_tmp_2); 1176 // Z_tos : value 1177 // Z_ARG3 : index 1178 // Z_tmp_2 : array 1179 1180 // Need to check whether array is boolean or byte 1181 // since both types share the bastore bytecode. 1182 __ load_klass(Z_tmp_1, Z_tmp_2); 1183 __ z_llgf(Z_tmp_1, Address(Z_tmp_1, Klass::layout_helper_offset())); 1184 __ z_tmll(Z_tmp_1, Klass::layout_helper_boolean_diffbit()); 1185 Label L_skip; 1186 __ z_bfalse(L_skip); 1187 // if it is a T_BOOLEAN array, mask the stored value to 0/1 1188 __ z_nilf(Z_tos, 0x1); 1189 __ bind(L_skip); 1190 1191 // No index shift necessary - pass 0. 1192 index_check(Z_tmp_2, Z_ARG3, 0); // Prefer index in Z_ARG3. 1193 __ z_stc(Z_tos, 1194 Address(Z_tmp_2, Z_ARG3, arrayOopDesc::base_offset_in_bytes(T_BYTE))); 1195 } 1196 1197 void TemplateTable::castore() { 1198 transition(itos, vtos); 1199 1200 __ pop_i(Z_ARG3); 1201 __ pop_ptr(Z_tmp_2); 1202 // Z_tos : value 1203 // Z_ARG3 : index 1204 // Z_tmp_2 : array 1205 Register index = Z_ARG3; // prefer index in Z_ARG3 1206 index_check(Z_tmp_2, index, LogBytesPerShort); 1207 __ z_sth(Z_tos, 1208 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 1209 } 1210 1211 void TemplateTable::sastore() { 1212 castore(); 1213 } 1214 1215 void TemplateTable::istore(int n) { 1216 transition(itos, vtos); 1217 __ reg2mem_opt(Z_tos, iaddress(n), false); 1218 } 1219 1220 void TemplateTable::lstore(int n) { 1221 transition(ltos, vtos); 1222 __ reg2mem_opt(Z_tos, laddress(n)); 1223 } 1224 1225 void TemplateTable::fstore(int n) { 1226 transition(ftos, vtos); 1227 __ freg2mem_opt(Z_ftos, faddress(n), false); 1228 } 1229 1230 void TemplateTable::dstore(int n) { 1231 transition(dtos, vtos); 1232 __ freg2mem_opt(Z_ftos, daddress(n)); 1233 } 1234 1235 void TemplateTable::astore(int n) { 1236 transition(vtos, vtos); 1237 __ pop_ptr(Z_tos); 1238 __ reg2mem_opt(Z_tos, aaddress(n)); 1239 } 1240 1241 void TemplateTable::pop() { 1242 transition(vtos, vtos); 1243 __ add2reg(Z_esp, Interpreter::stackElementSize); 1244 } 1245 1246 void TemplateTable::pop2() { 1247 transition(vtos, vtos); 1248 __ add2reg(Z_esp, 2 * Interpreter::stackElementSize); 1249 } 1250 1251 void TemplateTable::dup() { 1252 transition(vtos, vtos); 1253 __ load_ptr(0, Z_tos); 1254 __ push_ptr(Z_tos); 1255 // stack: ..., a, a 1256 } 1257 1258 void TemplateTable::dup_x1() { 1259 transition(vtos, vtos); 1260 1261 // stack: ..., a, b 1262 __ load_ptr(0, Z_tos); // load b 1263 __ load_ptr(1, Z_R0_scratch); // load a 1264 __ store_ptr(1, Z_tos); // store b 1265 __ store_ptr(0, Z_R0_scratch); // store a 1266 __ push_ptr(Z_tos); // push b 1267 // stack: ..., b, a, b 1268 } 1269 1270 void TemplateTable::dup_x2() { 1271 transition(vtos, vtos); 1272 1273 // stack: ..., a, b, c 1274 __ load_ptr(0, Z_R0_scratch); // load c 1275 __ load_ptr(2, Z_R1_scratch); // load a 1276 __ store_ptr(2, Z_R0_scratch); // store c in a 1277 __ push_ptr(Z_R0_scratch); // push c 1278 // stack: ..., c, b, c, c 1279 __ load_ptr(2, Z_R0_scratch); // load b 1280 __ store_ptr(2, Z_R1_scratch); // store a in b 1281 // stack: ..., c, a, c, c 1282 __ store_ptr(1, Z_R0_scratch); // store b in c 1283 // stack: ..., c, a, b, c 1284 } 1285 1286 void TemplateTable::dup2() { 1287 transition(vtos, vtos); 1288 1289 // stack: ..., a, b 1290 __ load_ptr(1, Z_R0_scratch); // load a 1291 __ push_ptr(Z_R0_scratch); // push a 1292 __ load_ptr(1, Z_R0_scratch); // load b 1293 __ push_ptr(Z_R0_scratch); // push b 1294 // stack: ..., a, b, a, b 1295 } 1296 1297 void TemplateTable::dup2_x1() { 1298 transition(vtos, vtos); 1299 1300 // stack: ..., a, b, c 1301 __ load_ptr(0, Z_R0_scratch); // load c 1302 __ load_ptr(1, Z_R1_scratch); // load b 1303 __ push_ptr(Z_R1_scratch); // push b 1304 __ push_ptr(Z_R0_scratch); // push c 1305 // stack: ..., a, b, c, b, c 1306 __ store_ptr(3, Z_R0_scratch); // store c in b 1307 // stack: ..., a, c, c, b, c 1308 __ load_ptr( 4, Z_R0_scratch); // load a 1309 __ store_ptr(2, Z_R0_scratch); // store a in 2nd c 1310 // stack: ..., a, c, a, b, c 1311 __ store_ptr(4, Z_R1_scratch); // store b in a 1312 // stack: ..., b, c, a, b, c 1313 } 1314 1315 void TemplateTable::dup2_x2() { 1316 transition(vtos, vtos); 1317 1318 // stack: ..., a, b, c, d 1319 __ load_ptr(0, Z_R0_scratch); // load d 1320 __ load_ptr(1, Z_R1_scratch); // load c 1321 __ push_ptr(Z_R1_scratch); // push c 1322 __ push_ptr(Z_R0_scratch); // push d 1323 // stack: ..., a, b, c, d, c, d 1324 __ load_ptr(4, Z_R1_scratch); // load b 1325 __ store_ptr(2, Z_R1_scratch); // store b in d 1326 __ store_ptr(4, Z_R0_scratch); // store d in b 1327 // stack: ..., a, d, c, b, c, d 1328 __ load_ptr(5, Z_R0_scratch); // load a 1329 __ load_ptr(3, Z_R1_scratch); // load c 1330 __ store_ptr(3, Z_R0_scratch); // store a in c 1331 __ store_ptr(5, Z_R1_scratch); // store c in a 1332 // stack: ..., c, d, a, b, c, d 1333 } 1334 1335 void TemplateTable::swap() { 1336 transition(vtos, vtos); 1337 1338 // stack: ..., a, b 1339 __ load_ptr(1, Z_R0_scratch); // load a 1340 __ load_ptr(0, Z_R1_scratch); // load b 1341 __ store_ptr(0, Z_R0_scratch); // store a in b 1342 __ store_ptr(1, Z_R1_scratch); // store b in a 1343 // stack: ..., b, a 1344 } 1345 1346 void TemplateTable::iop2(Operation op) { 1347 transition(itos, itos); 1348 switch (op) { 1349 case add : __ z_ay(Z_tos, __ stackTop()); __ pop_i(); break; 1350 case sub : __ z_sy(Z_tos, __ stackTop()); __ pop_i(); __ z_lcr(Z_tos, Z_tos); break; 1351 case mul : __ z_msy(Z_tos, __ stackTop()); __ pop_i(); break; 1352 case _and : __ z_ny(Z_tos, __ stackTop()); __ pop_i(); break; 1353 case _or : __ z_oy(Z_tos, __ stackTop()); __ pop_i(); break; 1354 case _xor : __ z_xy(Z_tos, __ stackTop()); __ pop_i(); break; 1355 case shl : __ z_lr(Z_tmp_1, Z_tos); 1356 __ z_nill(Z_tmp_1, 31); // Lowest 5 bits are shiftamount. 1357 __ pop_i(Z_tos); __ z_sll(Z_tos, 0, Z_tmp_1); break; 1358 case shr : __ z_lr(Z_tmp_1, Z_tos); 1359 __ z_nill(Z_tmp_1, 31); // Lowest 5 bits are shiftamount. 1360 __ pop_i(Z_tos); __ z_sra(Z_tos, 0, Z_tmp_1); break; 1361 case ushr : __ z_lr(Z_tmp_1, Z_tos); 1362 __ z_nill(Z_tmp_1, 31); // Lowest 5 bits are shiftamount. 1363 __ pop_i(Z_tos); __ z_srl(Z_tos, 0, Z_tmp_1); break; 1364 default : ShouldNotReachHere(); break; 1365 } 1366 return; 1367 } 1368 1369 void TemplateTable::lop2(Operation op) { 1370 transition(ltos, ltos); 1371 1372 switch (op) { 1373 case add : __ z_ag(Z_tos, __ stackTop()); __ pop_l(); break; 1374 case sub : __ z_sg(Z_tos, __ stackTop()); __ pop_l(); __ z_lcgr(Z_tos, Z_tos); break; 1375 case mul : __ z_msg(Z_tos, __ stackTop()); __ pop_l(); break; 1376 case _and : __ z_ng(Z_tos, __ stackTop()); __ pop_l(); break; 1377 case _or : __ z_og(Z_tos, __ stackTop()); __ pop_l(); break; 1378 case _xor : __ z_xg(Z_tos, __ stackTop()); __ pop_l(); break; 1379 default : ShouldNotReachHere(); break; 1380 } 1381 return; 1382 } 1383 1384 // Common part of idiv/irem. 1385 static void idiv_helper(InterpreterMacroAssembler * _masm, address exception) { 1386 NearLabel not_null; 1387 1388 // Use register pair Z_tmp_1, Z_tmp_2 for DIVIDE SINGLE. 1389 assert(Z_tmp_1->successor() == Z_tmp_2, " need even/odd register pair for idiv/irem"); 1390 1391 // Get dividend. 1392 __ pop_i(Z_tmp_2); 1393 1394 // If divisor == 0 throw exception. 1395 __ compare32_and_branch(Z_tos, (intptr_t) 0, 1396 Assembler::bcondNotEqual, not_null ); 1397 __ load_absolute_address(Z_R1_scratch, exception); 1398 __ z_br(Z_R1_scratch); 1399 1400 __ bind(not_null); 1401 1402 __ z_lgfr(Z_tmp_2, Z_tmp_2); // Sign extend dividend. 1403 __ z_dsgfr(Z_tmp_1, Z_tos); // Do it. 1404 } 1405 1406 void TemplateTable::idiv() { 1407 transition(itos, itos); 1408 1409 idiv_helper(_masm, Interpreter::_throw_ArithmeticException_entry); 1410 __ z_llgfr(Z_tos, Z_tmp_2); // Result is in Z_tmp_2. 1411 } 1412 1413 void TemplateTable::irem() { 1414 transition(itos, itos); 1415 1416 idiv_helper(_masm, Interpreter::_throw_ArithmeticException_entry); 1417 __ z_llgfr(Z_tos, Z_tmp_1); // Result is in Z_tmp_1. 1418 } 1419 1420 void TemplateTable::lmul() { 1421 transition(ltos, ltos); 1422 1423 // Multiply with memory operand. 1424 __ z_msg(Z_tos, __ stackTop()); 1425 __ pop_l(); // Pop operand. 1426 } 1427 1428 // Common part of ldiv/lrem. 1429 // 1430 // Input: 1431 // Z_tos := the divisor (dividend still on stack) 1432 // 1433 // Updated registers: 1434 // Z_tmp_1 := pop_l() % Z_tos ; if is_ldiv == false 1435 // Z_tmp_2 := pop_l() / Z_tos ; if is_ldiv == true 1436 // 1437 static void ldiv_helper(InterpreterMacroAssembler * _masm, address exception, bool is_ldiv) { 1438 NearLabel not_null, done; 1439 1440 // Use register pair Z_tmp_1, Z_tmp_2 for DIVIDE SINGLE. 1441 assert(Z_tmp_1->successor() == Z_tmp_2, 1442 " need even/odd register pair for idiv/irem"); 1443 1444 // Get dividend. 1445 __ pop_l(Z_tmp_2); 1446 1447 // If divisor == 0 throw exception. 1448 __ compare64_and_branch(Z_tos, (intptr_t)0, Assembler::bcondNotEqual, not_null); 1449 __ load_absolute_address(Z_R1_scratch, exception); 1450 __ z_br(Z_R1_scratch); 1451 1452 __ bind(not_null); 1453 // Special case for dividend == 0x8000 and divisor == -1. 1454 if (is_ldiv) { 1455 // result := Z_tmp_2 := - dividend 1456 __ z_lcgr(Z_tmp_2, Z_tmp_2); 1457 } else { 1458 // result remainder := Z_tmp_1 := 0 1459 __ clear_reg(Z_tmp_1, true, false); // Don't set CC. 1460 } 1461 1462 // if divisor == -1 goto done 1463 __ compare64_and_branch(Z_tos, -1, Assembler::bcondEqual, done); 1464 if (is_ldiv) 1465 // Restore sign, because divisor != -1. 1466 __ z_lcgr(Z_tmp_2, Z_tmp_2); 1467 __ z_dsgr(Z_tmp_1, Z_tos); // Do it. 1468 __ bind(done); 1469 } 1470 1471 void TemplateTable::ldiv() { 1472 transition(ltos, ltos); 1473 1474 ldiv_helper(_masm, Interpreter::_throw_ArithmeticException_entry, true /*is_ldiv*/); 1475 __ z_lgr(Z_tos, Z_tmp_2); // Result is in Z_tmp_2. 1476 } 1477 1478 void TemplateTable::lrem() { 1479 transition(ltos, ltos); 1480 1481 ldiv_helper(_masm, Interpreter::_throw_ArithmeticException_entry, false /*is_ldiv*/); 1482 __ z_lgr(Z_tos, Z_tmp_1); // Result is in Z_tmp_1. 1483 } 1484 1485 void TemplateTable::lshl() { 1486 transition(itos, ltos); 1487 1488 // Z_tos: shift amount 1489 __ pop_l(Z_tmp_1); // Get shift value. 1490 __ z_sllg(Z_tos, Z_tmp_1, 0, Z_tos); 1491 } 1492 1493 void TemplateTable::lshr() { 1494 transition(itos, ltos); 1495 1496 // Z_tos: shift amount 1497 __ pop_l(Z_tmp_1); // Get shift value. 1498 __ z_srag(Z_tos, Z_tmp_1, 0, Z_tos); 1499 } 1500 1501 void TemplateTable::lushr() { 1502 transition(itos, ltos); 1503 1504 // Z_tos: shift amount 1505 __ pop_l(Z_tmp_1); // Get shift value. 1506 __ z_srlg(Z_tos, Z_tmp_1, 0, Z_tos); 1507 } 1508 1509 void TemplateTable::fop2(Operation op) { 1510 transition(ftos, ftos); 1511 1512 switch (op) { 1513 case add: 1514 // Add memory operand. 1515 __ z_aeb(Z_ftos, __ stackTop()); __ pop_f(); return; 1516 case sub: 1517 // Sub memory operand. 1518 __ z_ler(Z_F1, Z_ftos); // first operand 1519 __ pop_f(Z_ftos); // second operand from stack 1520 __ z_sebr(Z_ftos, Z_F1); 1521 return; 1522 case mul: 1523 // Multiply with memory operand. 1524 __ z_meeb(Z_ftos, __ stackTop()); __ pop_f(); return; 1525 case div: 1526 __ z_ler(Z_F1, Z_ftos); // first operand 1527 __ pop_f(Z_ftos); // second operand from stack 1528 __ z_debr(Z_ftos, Z_F1); 1529 return; 1530 case rem: 1531 // Do runtime call. 1532 __ z_ler(Z_FARG2, Z_ftos); // divisor 1533 __ pop_f(Z_FARG1); // dividend 1534 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1535 // Result should be in the right place (Z_ftos == Z_FRET). 1536 return; 1537 default: 1538 ShouldNotReachHere(); 1539 return; 1540 } 1541 } 1542 1543 void TemplateTable::dop2(Operation op) { 1544 transition(dtos, dtos); 1545 1546 switch (op) { 1547 case add: 1548 // Add memory operand. 1549 __ z_adb(Z_ftos, __ stackTop()); __ pop_d(); return; 1550 case sub: 1551 // Sub memory operand. 1552 __ z_ldr(Z_F1, Z_ftos); // first operand 1553 __ pop_d(Z_ftos); // second operand from stack 1554 __ z_sdbr(Z_ftos, Z_F1); 1555 return; 1556 case mul: 1557 // Multiply with memory operand. 1558 __ z_mdb(Z_ftos, __ stackTop()); __ pop_d(); return; 1559 case div: 1560 __ z_ldr(Z_F1, Z_ftos); // first operand 1561 __ pop_d(Z_ftos); // second operand from stack 1562 __ z_ddbr(Z_ftos, Z_F1); 1563 return; 1564 case rem: 1565 // Do runtime call. 1566 __ z_ldr(Z_FARG2, Z_ftos); // divisor 1567 __ pop_d(Z_FARG1); // dividend 1568 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1569 // Result should be in the right place (Z_ftos == Z_FRET). 1570 return; 1571 default: 1572 ShouldNotReachHere(); 1573 return; 1574 } 1575 } 1576 1577 void TemplateTable::ineg() { 1578 transition(itos, itos); 1579 __ z_lcr(Z_tos); 1580 } 1581 1582 void TemplateTable::lneg() { 1583 transition(ltos, ltos); 1584 __ z_lcgr(Z_tos); 1585 } 1586 1587 void TemplateTable::fneg() { 1588 transition(ftos, ftos); 1589 __ z_lcebr(Z_ftos, Z_ftos); 1590 } 1591 1592 void TemplateTable::dneg() { 1593 transition(dtos, dtos); 1594 __ z_lcdbr(Z_ftos, Z_ftos); 1595 } 1596 1597 void TemplateTable::iinc() { 1598 transition(vtos, vtos); 1599 1600 Address local; 1601 __ z_lb(Z_R0_scratch, at_bcp(2)); // Get constant. 1602 locals_index(Z_R1_scratch); 1603 local = iaddress(_masm, Z_R1_scratch); 1604 __ z_a(Z_R0_scratch, local); 1605 __ reg2mem_opt(Z_R0_scratch, local, false); 1606 } 1607 1608 void TemplateTable::wide_iinc() { 1609 transition(vtos, vtos); 1610 1611 // Z_tmp_1 := increment 1612 __ get_2_byte_integer_at_bcp(Z_tmp_1, 4, InterpreterMacroAssembler::Signed); 1613 // Z_R1_scratch := index of local to increment 1614 locals_index_wide(Z_tmp_2); 1615 // Load, increment, and store. 1616 __ access_local_int(Z_tmp_2, Z_tos); 1617 __ z_agr(Z_tos, Z_tmp_1); 1618 // Shifted index is still in Z_tmp_2. 1619 __ reg2mem_opt(Z_tos, Address(Z_locals, Z_tmp_2), false); 1620 } 1621 1622 1623 void TemplateTable::convert() { 1624 // Checking 1625 #ifdef ASSERT 1626 TosState tos_in = ilgl; 1627 TosState tos_out = ilgl; 1628 1629 switch (bytecode()) { 1630 case Bytecodes::_i2l: 1631 case Bytecodes::_i2f: 1632 case Bytecodes::_i2d: 1633 case Bytecodes::_i2b: 1634 case Bytecodes::_i2c: 1635 case Bytecodes::_i2s: 1636 tos_in = itos; 1637 break; 1638 case Bytecodes::_l2i: 1639 case Bytecodes::_l2f: 1640 case Bytecodes::_l2d: 1641 tos_in = ltos; 1642 break; 1643 case Bytecodes::_f2i: 1644 case Bytecodes::_f2l: 1645 case Bytecodes::_f2d: 1646 tos_in = ftos; 1647 break; 1648 case Bytecodes::_d2i: 1649 case Bytecodes::_d2l: 1650 case Bytecodes::_d2f: 1651 tos_in = dtos; 1652 break; 1653 default : 1654 ShouldNotReachHere(); 1655 } 1656 switch (bytecode()) { 1657 case Bytecodes::_l2i: 1658 case Bytecodes::_f2i: 1659 case Bytecodes::_d2i: 1660 case Bytecodes::_i2b: 1661 case Bytecodes::_i2c: 1662 case Bytecodes::_i2s: 1663 tos_out = itos; 1664 break; 1665 case Bytecodes::_i2l: 1666 case Bytecodes::_f2l: 1667 case Bytecodes::_d2l: 1668 tos_out = ltos; 1669 break; 1670 case Bytecodes::_i2f: 1671 case Bytecodes::_l2f: 1672 case Bytecodes::_d2f: 1673 tos_out = ftos; 1674 break; 1675 case Bytecodes::_i2d: 1676 case Bytecodes::_l2d: 1677 case Bytecodes::_f2d: 1678 tos_out = dtos; 1679 break; 1680 default : 1681 ShouldNotReachHere(); 1682 } 1683 1684 transition(tos_in, tos_out); 1685 #endif // ASSERT 1686 1687 // Conversion 1688 Label done; 1689 switch (bytecode()) { 1690 case Bytecodes::_i2l: 1691 __ z_lgfr(Z_tos, Z_tos); 1692 return; 1693 case Bytecodes::_i2f: 1694 __ z_cefbr(Z_ftos, Z_tos); 1695 return; 1696 case Bytecodes::_i2d: 1697 __ z_cdfbr(Z_ftos, Z_tos); 1698 return; 1699 case Bytecodes::_i2b: 1700 // Sign extend least significant byte. 1701 __ move_reg_if_needed(Z_tos, T_BYTE, Z_tos, T_INT); 1702 return; 1703 case Bytecodes::_i2c: 1704 // Zero extend 2 least significant bytes. 1705 __ move_reg_if_needed(Z_tos, T_CHAR, Z_tos, T_INT); 1706 return; 1707 case Bytecodes::_i2s: 1708 // Sign extend 2 least significant bytes. 1709 __ move_reg_if_needed(Z_tos, T_SHORT, Z_tos, T_INT); 1710 return; 1711 case Bytecodes::_l2i: 1712 // Sign-extend not needed here, upper 4 bytes of int value in register are ignored. 1713 return; 1714 case Bytecodes::_l2f: 1715 __ z_cegbr(Z_ftos, Z_tos); 1716 return; 1717 case Bytecodes::_l2d: 1718 __ z_cdgbr(Z_ftos, Z_tos); 1719 return; 1720 case Bytecodes::_f2i: 1721 case Bytecodes::_f2l: 1722 __ clear_reg(Z_tos, true, false); // Don't set CC. 1723 __ z_cebr(Z_ftos, Z_ftos); 1724 __ z_brno(done); // NaN -> 0 1725 if (bytecode() == Bytecodes::_f2i) 1726 __ z_cfebr(Z_tos, Z_ftos, Assembler::to_zero); 1727 else // bytecode() == Bytecodes::_f2l 1728 __ z_cgebr(Z_tos, Z_ftos, Assembler::to_zero); 1729 break; 1730 case Bytecodes::_f2d: 1731 __ move_freg_if_needed(Z_ftos, T_DOUBLE, Z_ftos, T_FLOAT); 1732 return; 1733 case Bytecodes::_d2i: 1734 case Bytecodes::_d2l: 1735 __ clear_reg(Z_tos, true, false); // Ddon't set CC. 1736 __ z_cdbr(Z_ftos, Z_ftos); 1737 __ z_brno(done); // NaN -> 0 1738 if (bytecode() == Bytecodes::_d2i) 1739 __ z_cfdbr(Z_tos, Z_ftos, Assembler::to_zero); 1740 else // Bytecodes::_d2l 1741 __ z_cgdbr(Z_tos, Z_ftos, Assembler::to_zero); 1742 break; 1743 case Bytecodes::_d2f: 1744 __ move_freg_if_needed(Z_ftos, T_FLOAT, Z_ftos, T_DOUBLE); 1745 return; 1746 default: 1747 ShouldNotReachHere(); 1748 } 1749 __ bind(done); 1750 } 1751 1752 void TemplateTable::lcmp() { 1753 transition(ltos, itos); 1754 1755 Label done; 1756 Register val1 = Z_R0_scratch; 1757 Register val2 = Z_R1_scratch; 1758 1759 if (VM_Version::has_LoadStoreConditional()) { 1760 __ pop_l(val1); // pop value 1. 1761 __ z_lghi(val2, -1); // lt value 1762 __ z_cgr(val1, Z_tos); // Compare with Z_tos (value 2). Protect CC under all circumstances. 1763 __ z_lghi(val1, 1); // gt value 1764 __ z_lghi(Z_tos, 0); // eq value 1765 1766 __ z_locgr(Z_tos, val1, Assembler::bcondHigh); 1767 __ z_locgr(Z_tos, val2, Assembler::bcondLow); 1768 } else { 1769 __ pop_l(val1); // Pop value 1. 1770 __ z_cgr(val1, Z_tos); // Compare with Z_tos (value 2). Protect CC under all circumstances. 1771 1772 __ z_lghi(Z_tos, 0); // eq value 1773 __ z_bre(done); 1774 1775 __ z_lghi(Z_tos, 1); // gt value 1776 __ z_brh(done); 1777 1778 __ z_lghi(Z_tos, -1); // lt value 1779 } 1780 1781 __ bind(done); 1782 } 1783 1784 1785 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1786 Label done; 1787 1788 if (is_float) { 1789 __ pop_f(Z_FARG2); 1790 __ z_cebr(Z_FARG2, Z_ftos); 1791 } else { 1792 __ pop_d(Z_FARG2); 1793 __ z_cdbr(Z_FARG2, Z_ftos); 1794 } 1795 1796 if (VM_Version::has_LoadStoreConditional()) { 1797 Register one = Z_R0_scratch; 1798 Register minus_one = Z_R1_scratch; 1799 __ z_lghi(minus_one, -1); 1800 __ z_lghi(one, 1); 1801 __ z_lghi(Z_tos, 0); 1802 __ z_locgr(Z_tos, one, unordered_result == 1 ? Assembler::bcondHighOrNotOrdered : Assembler::bcondHigh); 1803 __ z_locgr(Z_tos, minus_one, unordered_result == 1 ? Assembler::bcondLow : Assembler::bcondLowOrNotOrdered); 1804 } else { 1805 // Z_FARG2 == Z_ftos 1806 __ clear_reg(Z_tos, false, false); 1807 __ z_bre(done); 1808 1809 // F_ARG2 > Z_Ftos, or unordered 1810 __ z_lhi(Z_tos, 1); 1811 __ z_brc(unordered_result == 1 ? Assembler::bcondHighOrNotOrdered : Assembler::bcondHigh, done); 1812 1813 // F_ARG2 < Z_FTOS, or unordered 1814 __ z_lhi(Z_tos, -1); 1815 1816 __ bind(done); 1817 } 1818 } 1819 1820 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1821 const Register bumped_count = Z_tmp_1; 1822 const Register method = Z_tmp_2; 1823 const Register m_counters = Z_R1_scratch; 1824 const Register mdo = Z_tos; 1825 1826 BLOCK_COMMENT("TemplateTable::branch {"); 1827 __ get_method(method); 1828 __ profile_taken_branch(mdo, bumped_count); 1829 1830 const ByteSize ctr_offset = InvocationCounter::counter_offset(); 1831 const ByteSize be_offset = MethodCounters::backedge_counter_offset() + ctr_offset; 1832 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() + ctr_offset; 1833 1834 // Get (wide) offset to disp. 1835 const Register disp = Z_ARG5; 1836 if (is_wide) { 1837 __ get_4_byte_integer_at_bcp(disp, 1); 1838 } else { 1839 __ get_2_byte_integer_at_bcp(disp, 1, InterpreterMacroAssembler::Signed); 1840 } 1841 1842 // Handle all the JSR stuff here, then exit. 1843 // It's much shorter and cleaner than intermingling with the 1844 // non-JSR normal-branch stuff occurring below. 1845 if (is_jsr) { 1846 // Compute return address as bci in Z_tos. 1847 __ z_lgr(Z_R1_scratch, Z_bcp); 1848 __ z_sg(Z_R1_scratch, Address(method, Method::const_offset())); 1849 __ add2reg(Z_tos, (is_wide ? 5 : 3) - in_bytes(ConstMethod::codes_offset()), Z_R1_scratch); 1850 1851 // Bump bcp to target of JSR. 1852 __ z_agr(Z_bcp, disp); 1853 // Push return address for "ret" on stack. 1854 __ push_ptr(Z_tos); 1855 // And away we go! 1856 __ dispatch_next(vtos); 1857 return; 1858 } 1859 1860 // Normal (non-jsr) branch handling. 1861 1862 // Bump bytecode pointer by displacement (take the branch). 1863 __ z_agr(Z_bcp, disp); 1864 1865 assert(UseLoopCounter || !UseOnStackReplacement, 1866 "on-stack-replacement requires loop counters"); 1867 1868 NearLabel backedge_counter_overflow; 1869 NearLabel profile_method; 1870 NearLabel dispatch; 1871 int increment = InvocationCounter::count_increment; 1872 1873 if (UseLoopCounter) { 1874 // Increment backedge counter for backward branches. 1875 // disp: target offset 1876 // Z_bcp: target bcp 1877 // Z_locals: locals pointer 1878 // 1879 // Count only if backward branch. 1880 __ compare32_and_branch(disp, (intptr_t)0, Assembler::bcondHigh, dispatch); 1881 1882 if (TieredCompilation) { 1883 Label noCounters; 1884 1885 if (ProfileInterpreter) { 1886 NearLabel no_mdo; 1887 1888 // Are we profiling? 1889 __ load_and_test_long(mdo, Address(method, Method::method_data_offset())); 1890 __ branch_optimized(Assembler::bcondZero, no_mdo); 1891 1892 // Increment the MDO backedge counter. 1893 const Address mdo_backedge_counter(mdo, MethodData::backedge_counter_offset() + InvocationCounter::counter_offset()); 1894 1895 const Address mask(mdo, MethodData::backedge_mask_offset()); 1896 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, 1897 Z_ARG2, false, Assembler::bcondZero, 1898 UseOnStackReplacement ? &backedge_counter_overflow : NULL); 1899 __ z_bru(dispatch); 1900 __ bind(no_mdo); 1901 } 1902 1903 // Increment backedge counter in MethodCounters*. 1904 __ get_method_counters(method, m_counters, noCounters); 1905 const Address mask(m_counters, MethodCounters::backedge_mask_offset()); 1906 __ increment_mask_and_jump(Address(m_counters, be_offset), 1907 increment, mask, 1908 Z_ARG2, false, Assembler::bcondZero, 1909 UseOnStackReplacement ? &backedge_counter_overflow : NULL); 1910 __ bind(noCounters); 1911 } else { 1912 Register counter = Z_tos; 1913 Label noCounters; 1914 // Get address of MethodCounters object. 1915 __ get_method_counters(method, m_counters, noCounters); 1916 // Increment backedge counter. 1917 __ increment_backedge_counter(m_counters, counter); 1918 1919 if (ProfileInterpreter) { 1920 // Test to see if we should create a method data obj. 1921 __ z_cl(counter, Address(m_counters, MethodCounters::interpreter_profile_limit_offset())); 1922 __ z_brl(dispatch); 1923 1924 // If no method data exists, go to profile method. 1925 __ test_method_data_pointer(Z_ARG4/*result unused*/, profile_method); 1926 1927 if (UseOnStackReplacement) { 1928 // Check for overflow against 'bumped_count' which is the MDO taken count. 1929 __ z_cl(bumped_count, Address(m_counters, MethodCounters::interpreter_backward_branch_limit_offset())); 1930 __ z_brl(dispatch); 1931 1932 // When ProfileInterpreter is on, the backedge_count comes 1933 // from the methodDataOop, which value does not get reset on 1934 // the call to frequency_counter_overflow(). To avoid 1935 // excessive calls to the overflow routine while the method is 1936 // being compiled, add a second test to make sure the overflow 1937 // function is called only once every overflow_frequency. 1938 const int overflow_frequency = 1024; 1939 __ and_imm(bumped_count, overflow_frequency - 1); 1940 __ z_brz(backedge_counter_overflow); 1941 1942 } 1943 } else { 1944 if (UseOnStackReplacement) { 1945 // Check for overflow against 'counter', which is the sum of the 1946 // counters. 1947 __ z_cl(counter, Address(m_counters, MethodCounters::interpreter_backward_branch_limit_offset())); 1948 __ z_brh(backedge_counter_overflow); 1949 } 1950 } 1951 __ bind(noCounters); 1952 } 1953 1954 __ bind(dispatch); 1955 } 1956 1957 // Pre-load the next target bytecode into rbx. 1958 __ z_llgc(Z_bytecode, Address(Z_bcp, (intptr_t) 0)); 1959 1960 // Continue with the bytecode @ target. 1961 // Z_tos: Return bci for jsr's, unused otherwise. 1962 // Z_bytecode: target bytecode 1963 // Z_bcp: target bcp 1964 __ dispatch_only(vtos); 1965 1966 // Out-of-line code runtime calls. 1967 if (UseLoopCounter) { 1968 if (ProfileInterpreter) { 1969 // Out-of-line code to allocate method data oop. 1970 __ bind(profile_method); 1971 1972 __ call_VM(noreg, 1973 CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1974 __ z_llgc(Z_bytecode, Address(Z_bcp, (intptr_t) 0)); // Restore target bytecode. 1975 __ set_method_data_pointer_for_bcp(); 1976 __ z_bru(dispatch); 1977 } 1978 1979 if (UseOnStackReplacement) { 1980 1981 // invocation counter overflow 1982 __ bind(backedge_counter_overflow); 1983 1984 __ z_lcgr(Z_ARG2, disp); // Z_ARG2 := -disp 1985 __ z_agr(Z_ARG2, Z_bcp); // Z_ARG2 := branch target bcp - disp == branch bcp 1986 __ call_VM(noreg, 1987 CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), 1988 Z_ARG2); 1989 1990 // Z_RET: osr nmethod (osr ok) or NULL (osr not possible). 1991 __ compare64_and_branch(Z_RET, (intptr_t) 0, Assembler::bcondEqual, dispatch); 1992 1993 // Nmethod may have been invalidated (VM may block upon call_VM return). 1994 __ z_cliy(nmethod::state_offset(), Z_RET, nmethod::in_use); 1995 __ z_brne(dispatch); 1996 1997 // Migrate the interpreter frame off of the stack. 1998 1999 __ z_lgr(Z_tmp_1, Z_RET); // Save the nmethod. 2000 2001 call_VM(noreg, 2002 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); 2003 2004 // Z_RET is OSR buffer, move it to expected parameter location. 2005 __ lgr_if_needed(Z_ARG1, Z_RET); 2006 2007 // Pop the interpreter frame ... 2008 __ pop_interpreter_frame(Z_R14, Z_ARG2/*tmp1*/, Z_ARG3/*tmp2*/); 2009 2010 // ... and begin the OSR nmethod. 2011 __ z_lg(Z_R1_scratch, Address(Z_tmp_1, nmethod::osr_entry_point_offset())); 2012 __ z_br(Z_R1_scratch); 2013 } 2014 } 2015 BLOCK_COMMENT("} TemplateTable::branch"); 2016 } 2017 2018 void TemplateTable::if_0cmp(Condition cc) { 2019 transition(itos, vtos); 2020 2021 // Assume branch is more often taken than not (loops use backward branches). 2022 NearLabel not_taken; 2023 __ compare32_and_branch(Z_tos, (intptr_t) 0, j_not(cc), not_taken); 2024 branch(false, false); 2025 __ bind(not_taken); 2026 __ profile_not_taken_branch(Z_tos); 2027 } 2028 2029 void TemplateTable::if_icmp(Condition cc) { 2030 transition(itos, vtos); 2031 2032 // Assume branch is more often taken than not (loops use backward branches). 2033 NearLabel not_taken; 2034 __ pop_i(Z_R0_scratch); 2035 __ compare32_and_branch(Z_R0_scratch, Z_tos, j_not(cc), not_taken); 2036 branch(false, false); 2037 __ bind(not_taken); 2038 __ profile_not_taken_branch(Z_tos); 2039 } 2040 2041 void TemplateTable::if_nullcmp(Condition cc) { 2042 transition(atos, vtos); 2043 2044 // Assume branch is more often taken than not (loops use backward branches) . 2045 NearLabel not_taken; 2046 __ compare64_and_branch(Z_tos, (intptr_t) 0, j_not(cc), not_taken); 2047 branch(false, false); 2048 __ bind(not_taken); 2049 __ profile_not_taken_branch(Z_tos); 2050 } 2051 2052 void TemplateTable::if_acmp(Condition cc) { 2053 transition(atos, vtos); 2054 // Assume branch is more often taken than not (loops use backward branches). 2055 NearLabel not_taken; 2056 __ pop_ptr(Z_ARG2); 2057 __ verify_oop(Z_ARG2); 2058 __ verify_oop(Z_tos); 2059 __ compareU64_and_branch(Z_tos, Z_ARG2, j_not(cc), not_taken); 2060 branch(false, false); 2061 __ bind(not_taken); 2062 __ profile_not_taken_branch(Z_ARG3); 2063 } 2064 2065 void TemplateTable::ret() { 2066 transition(vtos, vtos); 2067 2068 locals_index(Z_tmp_1); 2069 // Get return bci, compute return bcp. Must load 64 bits. 2070 __ mem2reg_opt(Z_tmp_1, iaddress(_masm, Z_tmp_1)); 2071 __ profile_ret(Z_tmp_1, Z_tmp_2); 2072 __ get_method(Z_tos); 2073 __ mem2reg_opt(Z_R1_scratch, Address(Z_tos, Method::const_offset())); 2074 __ load_address(Z_bcp, Address(Z_R1_scratch, Z_tmp_1, ConstMethod::codes_offset())); 2075 __ dispatch_next(vtos); 2076 } 2077 2078 void TemplateTable::wide_ret() { 2079 transition(vtos, vtos); 2080 2081 locals_index_wide(Z_tmp_1); 2082 // Get return bci, compute return bcp. 2083 __ mem2reg_opt(Z_tmp_1, aaddress(_masm, Z_tmp_1)); 2084 __ profile_ret(Z_tmp_1, Z_tmp_2); 2085 __ get_method(Z_tos); 2086 __ mem2reg_opt(Z_R1_scratch, Address(Z_tos, Method::const_offset())); 2087 __ load_address(Z_bcp, Address(Z_R1_scratch, Z_tmp_1, ConstMethod::codes_offset())); 2088 __ dispatch_next(vtos); 2089 } 2090 2091 void TemplateTable::tableswitch () { 2092 transition(itos, vtos); 2093 2094 NearLabel default_case, continue_execution; 2095 Register bcp = Z_ARG5; 2096 // Align bcp. 2097 __ load_address(bcp, at_bcp(BytesPerInt)); 2098 __ z_nill(bcp, (-BytesPerInt) & 0xffff); 2099 2100 // Load lo & hi. 2101 Register low = Z_tmp_1; 2102 Register high = Z_tmp_2; 2103 2104 // Load low into 64 bits, since used for address calculation. 2105 __ mem2reg_signed_opt(low, Address(bcp, BytesPerInt)); 2106 __ mem2reg_opt(high, Address(bcp, 2 * BytesPerInt), false); 2107 // Sign extend "label" value for address calculation. 2108 __ z_lgfr(Z_tos, Z_tos); 2109 2110 // Check against lo & hi. 2111 __ compare32_and_branch(Z_tos, low, Assembler::bcondLow, default_case); 2112 __ compare32_and_branch(Z_tos, high, Assembler::bcondHigh, default_case); 2113 2114 // Lookup dispatch offset. 2115 __ z_sgr(Z_tos, low); 2116 Register jump_table_offset = Z_ARG3; 2117 // Index2offset; index in Z_tos is killed by profile_switch_case. 2118 __ z_sllg(jump_table_offset, Z_tos, LogBytesPerInt); 2119 __ profile_switch_case(Z_tos, Z_ARG4 /*tmp for mdp*/, low/*tmp*/, Z_bytecode/*tmp*/); 2120 2121 Register index = Z_tmp_2; 2122 2123 // Load index sign extended for addressing. 2124 __ mem2reg_signed_opt(index, Address(bcp, jump_table_offset, 3 * BytesPerInt)); 2125 2126 // Continue execution. 2127 __ bind(continue_execution); 2128 2129 // Load next bytecode. 2130 __ z_llgc(Z_bytecode, Address(Z_bcp, index)); 2131 __ z_agr(Z_bcp, index); // Advance bcp. 2132 __ dispatch_only(vtos); 2133 2134 // Handle default. 2135 __ bind(default_case); 2136 2137 __ profile_switch_default(Z_tos); 2138 __ mem2reg_signed_opt(index, Address(bcp)); 2139 __ z_bru(continue_execution); 2140 } 2141 2142 void TemplateTable::lookupswitch () { 2143 transition(itos, itos); 2144 __ stop("lookupswitch bytecode should have been rewritten"); 2145 } 2146 2147 void TemplateTable::fast_linearswitch () { 2148 transition(itos, vtos); 2149 2150 Label loop_entry, loop, found, continue_execution; 2151 Register bcp = Z_ARG5; 2152 2153 // Align bcp. 2154 __ load_address(bcp, at_bcp(BytesPerInt)); 2155 __ z_nill(bcp, (-BytesPerInt) & 0xffff); 2156 2157 // Start search with last case. 2158 Register current_case_offset = Z_tmp_1; 2159 2160 __ mem2reg_signed_opt(current_case_offset, Address(bcp, BytesPerInt)); 2161 __ z_sllg(current_case_offset, current_case_offset, LogBytesPerWord); // index2bytes 2162 __ z_bru(loop_entry); 2163 2164 // table search 2165 __ bind(loop); 2166 2167 __ z_c(Z_tos, Address(bcp, current_case_offset, 2 * BytesPerInt)); 2168 __ z_bre(found); 2169 2170 __ bind(loop_entry); 2171 __ z_aghi(current_case_offset, -2 * BytesPerInt); // Decrement. 2172 __ z_brnl(loop); 2173 2174 // default case 2175 Register offset = Z_tmp_2; 2176 2177 __ profile_switch_default(Z_tos); 2178 // Load offset sign extended for addressing. 2179 __ mem2reg_signed_opt(offset, Address(bcp)); 2180 __ z_bru(continue_execution); 2181 2182 // Entry found -> get offset. 2183 __ bind(found); 2184 __ mem2reg_signed_opt(offset, Address(bcp, current_case_offset, 3 * BytesPerInt)); 2185 // Profile that this case was taken. 2186 Register current_case_idx = Z_ARG4; 2187 __ z_srlg(current_case_idx, current_case_offset, LogBytesPerWord); // bytes2index 2188 __ profile_switch_case(current_case_idx, Z_tos, bcp, Z_bytecode); 2189 2190 // Continue execution. 2191 __ bind(continue_execution); 2192 2193 // Load next bytecode. 2194 __ z_llgc(Z_bytecode, Address(Z_bcp, offset, 0)); 2195 __ z_agr(Z_bcp, offset); // Advance bcp. 2196 __ dispatch_only(vtos); 2197 } 2198 2199 2200 void TemplateTable::fast_binaryswitch() { 2201 2202 transition(itos, vtos); 2203 2204 // Implementation using the following core algorithm: 2205 // 2206 // int binary_search(int key, LookupswitchPair* array, int n) { 2207 // // Binary search according to "Methodik des Programmierens" by 2208 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 2209 // int i = 0; 2210 // int j = n; 2211 // while (i+1 < j) { 2212 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 2213 // // with Q: for all i: 0 <= i < n: key < a[i] 2214 // // where a stands for the array and assuming that the (inexisting) 2215 // // element a[n] is infinitely big. 2216 // int h = (i + j) >> 1; 2217 // // i < h < j 2218 // if (key < array[h].fast_match()) { 2219 // j = h; 2220 // } else { 2221 // i = h; 2222 // } 2223 // } 2224 // // R: a[i] <= key < a[i+1] or Q 2225 // // (i.e., if key is within array, i is the correct index) 2226 // return i; 2227 // } 2228 2229 // Register allocation 2230 // Note: Since we use the indices in address operands, we do all the 2231 // computation in 64 bits. 2232 const Register key = Z_tos; // Already set (tosca). 2233 const Register array = Z_tmp_1; 2234 const Register i = Z_tmp_2; 2235 const Register j = Z_ARG5; 2236 const Register h = Z_ARG4; 2237 const Register temp = Z_R1_scratch; 2238 2239 // Find array start. 2240 __ load_address(array, at_bcp(3 * BytesPerInt)); 2241 __ z_nill(array, (-BytesPerInt) & 0xffff); // align 2242 2243 // Initialize i & j. 2244 __ clear_reg(i, true, false); // i = 0; Don't set CC. 2245 __ mem2reg_signed_opt(j, Address(array, -BytesPerInt)); // j = length(array); 2246 2247 // And start. 2248 Label entry; 2249 __ z_bru(entry); 2250 2251 // binary search loop 2252 { 2253 NearLabel loop; 2254 2255 __ bind(loop); 2256 2257 // int h = (i + j) >> 1; 2258 __ add2reg_with_index(h, 0, i, j); // h = i + j; 2259 __ z_srag(h, h, 1); // h = (i + j) >> 1; 2260 2261 // if (key < array[h].fast_match()) { 2262 // j = h; 2263 // } else { 2264 // i = h; 2265 // } 2266 2267 // Convert array[h].match to native byte-ordering before compare. 2268 __ z_sllg(temp, h, LogBytesPerWord); // index2bytes 2269 __ mem2reg_opt(temp, Address(array, temp), false); 2270 2271 NearLabel else_; 2272 2273 __ compare32_and_branch(key, temp, Assembler::bcondNotLow, else_); 2274 // j = h if (key < array[h].fast_match()) 2275 __ z_lgr(j, h); 2276 __ z_bru(entry); // continue 2277 2278 __ bind(else_); 2279 2280 // i = h if (key >= array[h].fast_match()) 2281 __ z_lgr(i, h); // and fallthrough 2282 2283 // while (i+1 < j) 2284 __ bind(entry); 2285 2286 // if (i + 1 < j) continue search 2287 __ add2reg(h, 1, i); 2288 __ compare64_and_branch(h, j, Assembler::bcondLow, loop); 2289 } 2290 2291 // End of binary search, result index is i (must check again!). 2292 NearLabel default_case; 2293 2294 // h is no longer needed, so use it to hold the byte offset. 2295 __ z_sllg(h, i, LogBytesPerWord); // index2bytes 2296 __ mem2reg_opt(temp, Address(array, h), false); 2297 __ compare32_and_branch(key, temp, Assembler::bcondNotEqual, default_case); 2298 2299 // entry found -> j = offset 2300 __ mem2reg_signed_opt(j, Address(array, h, BytesPerInt)); 2301 __ profile_switch_case(i, key, array, Z_bytecode); 2302 // Load next bytecode. 2303 __ z_llgc(Z_bytecode, Address(Z_bcp, j)); 2304 __ z_agr(Z_bcp, j); // Advance bcp. 2305 __ dispatch_only(vtos); 2306 2307 // default case -> j = default offset 2308 __ bind(default_case); 2309 2310 __ profile_switch_default(i); 2311 __ mem2reg_signed_opt(j, Address(array, -2 * BytesPerInt)); 2312 // Load next bytecode. 2313 __ z_llgc(Z_bytecode, Address(Z_bcp, j)); 2314 __ z_agr(Z_bcp, j); // Advance bcp. 2315 __ dispatch_only(vtos); 2316 } 2317 2318 void TemplateTable::_return(TosState state) { 2319 transition(state, state); 2320 assert(_desc->calls_vm(), 2321 "inconsistent calls_vm information"); // call in remove_activation 2322 2323 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2324 Register Rthis = Z_ARG2; 2325 Register Rklass = Z_ARG5; 2326 Label skip_register_finalizer; 2327 assert(state == vtos, "only valid state"); 2328 __ z_lg(Rthis, aaddress(0)); 2329 __ load_klass(Rklass, Rthis); 2330 __ testbit(Address(Rklass, Klass::access_flags_offset()), exact_log2(JVM_ACC_HAS_FINALIZER)); 2331 __ z_bfalse(skip_register_finalizer); 2332 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Rthis); 2333 __ bind(skip_register_finalizer); 2334 } 2335 2336 if (state == itos) { 2337 // Narrow result if state is itos but result type is smaller. 2338 // Need to narrow in the return bytecode rather than in generate_return_entry 2339 // since compiled code callers expect the result to already be narrowed. 2340 __ narrow(Z_tos, Z_tmp_1); /* fall through */ 2341 } 2342 2343 __ remove_activation(state, Z_R14); 2344 __ z_br(Z_R14); 2345 } 2346 2347 // ---------------------------------------------------------------------------- 2348 // NOTE: Cpe_offset is already computed as byte offset, so we must not 2349 // shift it afterwards! 2350 void TemplateTable::resolve_cache_and_index(int byte_no, 2351 Register Rcache, 2352 Register cpe_offset, 2353 size_t index_size) { 2354 BLOCK_COMMENT("resolve_cache_and_index {"); 2355 NearLabel resolved; 2356 const Register bytecode_in_cpcache = Z_R1_scratch; 2357 const int total_f1_offset = in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f1_offset()); 2358 assert_different_registers(Rcache, cpe_offset, bytecode_in_cpcache); 2359 2360 Bytecodes::Code code = bytecode(); 2361 switch (code) { 2362 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; 2363 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; 2364 } 2365 2366 { 2367 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2368 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, cpe_offset, bytecode_in_cpcache, byte_no, 1, index_size); 2369 // Have we resolved this bytecode? 2370 __ compare32_and_branch(bytecode_in_cpcache, (int)code, Assembler::bcondEqual, resolved); 2371 } 2372 2373 // Resolve first time through. 2374 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 2375 __ load_const_optimized(Z_ARG2, (int) code); 2376 __ call_VM(noreg, entry, Z_ARG2); 2377 2378 // Update registers with resolved info. 2379 __ get_cache_and_index_at_bcp(Rcache, cpe_offset, 1, index_size); 2380 __ bind(resolved); 2381 BLOCK_COMMENT("} resolve_cache_and_index"); 2382 } 2383 2384 // The Rcache and index registers must be set before call. 2385 // Index is already a byte offset, don't shift! 2386 void TemplateTable::load_field_cp_cache_entry(Register obj, 2387 Register cache, 2388 Register index, 2389 Register off, 2390 Register flags, 2391 bool is_static = false) { 2392 assert_different_registers(cache, index, flags, off); 2393 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2394 2395 // Field offset 2396 __ mem2reg_opt(off, Address(cache, index, cp_base_offset + ConstantPoolCacheEntry::f2_offset())); 2397 // Flags. Must load 64 bits. 2398 __ mem2reg_opt(flags, Address(cache, index, cp_base_offset + ConstantPoolCacheEntry::flags_offset())); 2399 2400 // klass overwrite register 2401 if (is_static) { 2402 __ mem2reg_opt(obj, Address(cache, index, cp_base_offset + ConstantPoolCacheEntry::f1_offset())); 2403 __ mem2reg_opt(obj, Address(obj, Klass::java_mirror_offset())); 2404 __ resolve_oop_handle(obj); 2405 } 2406 } 2407 2408 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2409 Register method, 2410 Register itable_index, 2411 Register flags, 2412 bool is_invokevirtual, 2413 bool is_invokevfinal, // unused 2414 bool is_invokedynamic) { 2415 BLOCK_COMMENT("load_invoke_cp_cache_entry {"); 2416 // Setup registers. 2417 const Register cache = Z_ARG1; 2418 const Register cpe_offset= flags; 2419 const ByteSize base_off = ConstantPoolCache::base_offset(); 2420 const ByteSize f1_off = ConstantPoolCacheEntry::f1_offset(); 2421 const ByteSize f2_off = ConstantPoolCacheEntry::f2_offset(); 2422 const ByteSize flags_off = ConstantPoolCacheEntry::flags_offset(); 2423 const int method_offset = in_bytes(base_off + ((byte_no == f2_byte) ? f2_off : f1_off)); 2424 const int flags_offset = in_bytes(base_off + flags_off); 2425 // Access constant pool cache fields. 2426 const int index_offset = in_bytes(base_off + f2_off); 2427 2428 assert_different_registers(method, itable_index, flags, cache); 2429 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2430 2431 if (is_invokevfinal) { 2432 // Already resolved. 2433 assert(itable_index == noreg, "register not used"); 2434 __ get_cache_and_index_at_bcp(cache, cpe_offset, 1); 2435 } else { 2436 // Need to resolve. 2437 resolve_cache_and_index(byte_no, cache, cpe_offset, is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2438 } 2439 __ z_lg(method, Address(cache, cpe_offset, method_offset)); 2440 2441 if (itable_index != noreg) { 2442 __ z_lg(itable_index, Address(cache, cpe_offset, index_offset)); 2443 } 2444 2445 // Only load the lower 4 bytes and fill high bytes of flags with zeros. 2446 // Callers depend on this zero-extension!!! 2447 // Attention: overwrites cpe_offset == flags 2448 __ z_llgf(flags, Address(cache, cpe_offset, flags_offset + (BytesPerLong-BytesPerInt))); 2449 2450 BLOCK_COMMENT("} load_invoke_cp_cache_entry"); 2451 } 2452 2453 // The registers cache and index expected to be set before call. 2454 // Correct values of the cache and index registers are preserved. 2455 void TemplateTable::jvmti_post_field_access(Register cache, Register index, 2456 bool is_static, bool has_tos) { 2457 2458 // Do the JVMTI work here to avoid disturbing the register state below. 2459 // We use c_rarg registers here because we want to use the register used in 2460 // the call to the VM 2461 if (!JvmtiExport::can_post_field_access()) { 2462 return; 2463 } 2464 2465 // Check to see if a field access watch has been set before we 2466 // take the time to call into the VM. 2467 Label exit; 2468 assert_different_registers(cache, index, Z_tos); 2469 __ load_absolute_address(Z_tos, (address)JvmtiExport::get_field_access_count_addr()); 2470 __ load_and_test_int(Z_R0, Address(Z_tos)); 2471 __ z_brz(exit); 2472 2473 // Index is returned as byte offset, do not shift! 2474 __ get_cache_and_index_at_bcp(Z_ARG3, Z_R1_scratch, 1); 2475 2476 // cache entry pointer 2477 __ add2reg_with_index(Z_ARG3, 2478 in_bytes(ConstantPoolCache::base_offset()), 2479 Z_ARG3, Z_R1_scratch); 2480 2481 if (is_static) { 2482 __ clear_reg(Z_ARG2, true, false); // NULL object reference. Don't set CC. 2483 } else { 2484 __ mem2reg_opt(Z_ARG2, at_tos()); // Get object pointer without popping it. 2485 __ verify_oop(Z_ARG2); 2486 } 2487 // Z_ARG2: object pointer or NULL 2488 // Z_ARG3: cache entry pointer 2489 __ call_VM(noreg, 2490 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), 2491 Z_ARG2, Z_ARG3); 2492 __ get_cache_and_index_at_bcp(cache, index, 1); 2493 2494 __ bind(exit); 2495 } 2496 2497 void TemplateTable::pop_and_check_object(Register r) { 2498 __ pop_ptr(r); 2499 __ null_check(r); // for field access must check obj. 2500 __ verify_oop(r); 2501 } 2502 2503 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2504 transition(vtos, vtos); 2505 2506 const Register cache = Z_tmp_1; 2507 const Register index = Z_tmp_2; 2508 const Register obj = Z_tmp_1; 2509 const Register off = Z_ARG2; 2510 const Register flags = Z_ARG1; 2511 const Register bc = Z_tmp_1; // Uses same reg as obj, so don't mix them. 2512 2513 resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); 2514 jvmti_post_field_access(cache, index, is_static, false); 2515 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); 2516 2517 if (!is_static) { 2518 // Obj is on the stack. 2519 pop_and_check_object(obj); 2520 } 2521 2522 // Displacement is 0, so any store instruction will be fine on any CPU. 2523 const Address field(obj, off); 2524 2525 Label is_Byte, is_Bool, is_Int, is_Short, is_Char, 2526 is_Long, is_Float, is_Object, is_Double; 2527 Label is_badState8, is_badState9, is_badStateA, is_badStateB, 2528 is_badStateC, is_badStateD, is_badStateE, is_badStateF, 2529 is_badState; 2530 Label branchTable, atosHandler, Done; 2531 Register br_tab = Z_R1_scratch; 2532 bool do_rewrite = !is_static && (rc == may_rewrite); 2533 bool dont_rewrite = (is_static || (rc == may_not_rewrite)); 2534 2535 assert(do_rewrite == !dont_rewrite, "Oops, code is not fit for that"); 2536 assert(btos == 0, "change code, btos != 0"); 2537 2538 // Calculate branch table size. Generated code size depends on ASSERT and on bytecode rewriting. 2539 #ifdef ASSERT 2540 const unsigned int bsize = dont_rewrite ? BTB_MINSIZE*1 : BTB_MINSIZE*4; 2541 #else 2542 const unsigned int bsize = dont_rewrite ? BTB_MINSIZE*1 : BTB_MINSIZE*4; 2543 #endif 2544 2545 // Calculate address of branch table entry and branch there. 2546 { 2547 const int bit_shift = exact_log2(bsize); // Size of each branch table entry. 2548 const int r_bitpos = 63 - bit_shift; 2549 const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::tos_state_bits + 1; 2550 const int n_rotate = (bit_shift-ConstantPoolCacheEntry::tos_state_shift); 2551 __ z_larl(br_tab, branchTable); 2552 __ rotate_then_insert(flags, flags, l_bitpos, r_bitpos, n_rotate, true); 2553 } 2554 __ z_bc(Assembler::bcondAlways, 0, flags, br_tab); 2555 2556 __ align_address(bsize); 2557 BIND(branchTable); 2558 2559 // btos 2560 BTB_BEGIN(is_Byte, bsize, "getfield_or_static:is_Byte"); 2561 __ z_lb(Z_tos, field); 2562 __ push(btos); 2563 // Rewrite bytecode to be faster. 2564 if (do_rewrite) { 2565 patch_bytecode(Bytecodes::_fast_bgetfield, bc, Z_ARG5); 2566 } 2567 __ z_bru(Done); 2568 BTB_END(is_Byte, bsize, "getfield_or_static:is_Byte"); 2569 2570 // ztos 2571 BTB_BEGIN(is_Bool, bsize, "getfield_or_static:is_Bool"); 2572 __ z_lb(Z_tos, field); 2573 __ push(ztos); 2574 // Rewrite bytecode to be faster. 2575 if (do_rewrite) { 2576 // Use btos rewriting, no truncating to t/f bit is needed for getfield. 2577 patch_bytecode(Bytecodes::_fast_bgetfield, bc, Z_ARG5); 2578 } 2579 __ z_bru(Done); 2580 BTB_END(is_Bool, bsize, "getfield_or_static:is_Bool"); 2581 2582 // ctos 2583 BTB_BEGIN(is_Char, bsize, "getfield_or_static:is_Char"); 2584 // Load into 64 bits, works on all CPUs. 2585 __ z_llgh(Z_tos, field); 2586 __ push(ctos); 2587 // Rewrite bytecode to be faster. 2588 if (do_rewrite) { 2589 patch_bytecode(Bytecodes::_fast_cgetfield, bc, Z_ARG5); 2590 } 2591 __ z_bru(Done); 2592 BTB_END(is_Char, bsize, "getfield_or_static:is_Char"); 2593 2594 // stos 2595 BTB_BEGIN(is_Short, bsize, "getfield_or_static:is_Short"); 2596 __ z_lh(Z_tos, field); 2597 __ push(stos); 2598 // Rewrite bytecode to be faster. 2599 if (do_rewrite) { 2600 patch_bytecode(Bytecodes::_fast_sgetfield, bc, Z_ARG5); 2601 } 2602 __ z_bru(Done); 2603 BTB_END(is_Short, bsize, "getfield_or_static:is_Short"); 2604 2605 // itos 2606 BTB_BEGIN(is_Int, bsize, "getfield_or_static:is_Int"); 2607 __ mem2reg_opt(Z_tos, field, false); 2608 __ push(itos); 2609 // Rewrite bytecode to be faster. 2610 if (do_rewrite) { 2611 patch_bytecode(Bytecodes::_fast_igetfield, bc, Z_ARG5); 2612 } 2613 __ z_bru(Done); 2614 BTB_END(is_Int, bsize, "getfield_or_static:is_Int"); 2615 2616 // ltos 2617 BTB_BEGIN(is_Long, bsize, "getfield_or_static:is_Long"); 2618 __ mem2reg_opt(Z_tos, field); 2619 __ push(ltos); 2620 // Rewrite bytecode to be faster. 2621 if (do_rewrite) { 2622 patch_bytecode(Bytecodes::_fast_lgetfield, bc, Z_ARG5); 2623 } 2624 __ z_bru(Done); 2625 BTB_END(is_Long, bsize, "getfield_or_static:is_Long"); 2626 2627 // ftos 2628 BTB_BEGIN(is_Float, bsize, "getfield_or_static:is_Float"); 2629 __ mem2freg_opt(Z_ftos, field, false); 2630 __ push(ftos); 2631 // Rewrite bytecode to be faster. 2632 if (do_rewrite) { 2633 patch_bytecode(Bytecodes::_fast_fgetfield, bc, Z_ARG5); 2634 } 2635 __ z_bru(Done); 2636 BTB_END(is_Float, bsize, "getfield_or_static:is_Float"); 2637 2638 // dtos 2639 BTB_BEGIN(is_Double, bsize, "getfield_or_static:is_Double"); 2640 __ mem2freg_opt(Z_ftos, field); 2641 __ push(dtos); 2642 // Rewrite bytecode to be faster. 2643 if (do_rewrite) { 2644 patch_bytecode(Bytecodes::_fast_dgetfield, bc, Z_ARG5); 2645 } 2646 __ z_bru(Done); 2647 BTB_END(is_Double, bsize, "getfield_or_static:is_Double"); 2648 2649 // atos 2650 BTB_BEGIN(is_Object, bsize, "getfield_or_static:is_Object"); 2651 __ z_bru(atosHandler); 2652 BTB_END(is_Object, bsize, "getfield_or_static:is_Object"); 2653 2654 // Bad state detection comes at no extra runtime cost. 2655 BTB_BEGIN(is_badState8, bsize, "getfield_or_static:is_badState8"); 2656 __ z_illtrap(); 2657 __ z_bru(is_badState); 2658 BTB_END( is_badState8, bsize, "getfield_or_static:is_badState8"); 2659 BTB_BEGIN(is_badState9, bsize, "getfield_or_static:is_badState9"); 2660 __ z_illtrap(); 2661 __ z_bru(is_badState); 2662 BTB_END( is_badState9, bsize, "getfield_or_static:is_badState9"); 2663 BTB_BEGIN(is_badStateA, bsize, "getfield_or_static:is_badStateA"); 2664 __ z_illtrap(); 2665 __ z_bru(is_badState); 2666 BTB_END( is_badStateA, bsize, "getfield_or_static:is_badStateA"); 2667 BTB_BEGIN(is_badStateB, bsize, "getfield_or_static:is_badStateB"); 2668 __ z_illtrap(); 2669 __ z_bru(is_badState); 2670 BTB_END( is_badStateB, bsize, "getfield_or_static:is_badStateB"); 2671 BTB_BEGIN(is_badStateC, bsize, "getfield_or_static:is_badStateC"); 2672 __ z_illtrap(); 2673 __ z_bru(is_badState); 2674 BTB_END( is_badStateC, bsize, "getfield_or_static:is_badStateC"); 2675 BTB_BEGIN(is_badStateD, bsize, "getfield_or_static:is_badStateD"); 2676 __ z_illtrap(); 2677 __ z_bru(is_badState); 2678 BTB_END( is_badStateD, bsize, "getfield_or_static:is_badStateD"); 2679 BTB_BEGIN(is_badStateE, bsize, "getfield_or_static:is_badStateE"); 2680 __ z_illtrap(); 2681 __ z_bru(is_badState); 2682 BTB_END( is_badStateE, bsize, "getfield_or_static:is_badStateE"); 2683 BTB_BEGIN(is_badStateF, bsize, "getfield_or_static:is_badStateF"); 2684 __ z_illtrap(); 2685 __ z_bru(is_badState); 2686 BTB_END( is_badStateF, bsize, "getfield_or_static:is_badStateF"); 2687 2688 __ align_address(64); 2689 BIND(is_badState); // Do this outside branch table. Needs a lot of space. 2690 { 2691 unsigned int b_off = __ offset(); 2692 if (is_static) { 2693 __ stop_static("Bad state in getstatic"); 2694 } else { 2695 __ stop_static("Bad state in getfield"); 2696 } 2697 unsigned int e_off = __ offset(); 2698 } 2699 2700 __ align_address(64); 2701 BIND(atosHandler); // Oops are really complicated to handle. 2702 // There is a lot of code generated. 2703 // Therefore: generate the handler outside of branch table. 2704 // There is no performance penalty. The additional branch 2705 // to here is compensated for by the fallthru to "Done". 2706 { 2707 unsigned int b_off = __ offset(); 2708 __ load_heap_oop(Z_tos, field); 2709 __ verify_oop(Z_tos); 2710 __ push(atos); 2711 if (do_rewrite) { 2712 patch_bytecode(Bytecodes::_fast_agetfield, bc, Z_ARG5); 2713 } 2714 unsigned int e_off = __ offset(); 2715 } 2716 2717 BIND(Done); 2718 } 2719 2720 void TemplateTable::getfield(int byte_no) { 2721 BLOCK_COMMENT("getfield {"); 2722 getfield_or_static(byte_no, false); 2723 BLOCK_COMMENT("} getfield"); 2724 } 2725 2726 void TemplateTable::nofast_getfield(int byte_no) { 2727 getfield_or_static(byte_no, false, may_not_rewrite); 2728 } 2729 2730 void TemplateTable::getstatic(int byte_no) { 2731 BLOCK_COMMENT("getstatic {"); 2732 getfield_or_static(byte_no, true); 2733 BLOCK_COMMENT("} getstatic"); 2734 } 2735 2736 // The registers cache and index expected to be set before call. The 2737 // function may destroy various registers, just not the cache and 2738 // index registers. 2739 void TemplateTable::jvmti_post_field_mod(Register cache, 2740 Register index, bool is_static) { 2741 transition(vtos, vtos); 2742 2743 if (!JvmtiExport::can_post_field_modification()) { 2744 return; 2745 } 2746 2747 BLOCK_COMMENT("jvmti_post_field_mod {"); 2748 2749 // Check to see if a field modification watch has been set before 2750 // we take the time to call into the VM. 2751 Label L1; 2752 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2753 assert_different_registers(cache, index, Z_tos); 2754 2755 __ load_absolute_address(Z_tos, (address)JvmtiExport::get_field_modification_count_addr()); 2756 __ load_and_test_int(Z_R0, Address(Z_tos)); 2757 __ z_brz(L1); 2758 2759 // Index is returned as byte offset, do not shift! 2760 __ get_cache_and_index_at_bcp(Z_ARG3, Z_R1_scratch, 1); 2761 2762 if (is_static) { 2763 // Life is simple. Null out the object pointer. 2764 __ clear_reg(Z_ARG2, true, false); // Don't set CC. 2765 } else { 2766 // Life is harder. The stack holds the value on top, followed by 2767 // the object. We don't know the size of the value, though. It 2768 // could be one or two words depending on its type. As a result, 2769 // we must find the type to determine where the object is. 2770 __ mem2reg_opt(Z_ARG4, 2771 Address(Z_ARG3, Z_R1_scratch, 2772 in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()) + 2773 (BytesPerLong - BytesPerInt)), 2774 false); 2775 __ z_srl(Z_ARG4, ConstantPoolCacheEntry::tos_state_shift); 2776 // Make sure we don't need to mask Z_ARG4 for tos_state after the above shift. 2777 ConstantPoolCacheEntry::verify_tos_state_shift(); 2778 __ mem2reg_opt(Z_ARG2, at_tos(1)); // Initially assume a one word jvalue. 2779 2780 NearLabel load_dtos, cont; 2781 2782 __ compareU32_and_branch(Z_ARG4, (intptr_t) ltos, 2783 Assembler::bcondNotEqual, load_dtos); 2784 __ mem2reg_opt(Z_ARG2, at_tos(2)); // ltos (two word jvalue) 2785 __ z_bru(cont); 2786 2787 __ bind(load_dtos); 2788 __ compareU32_and_branch(Z_ARG4, (intptr_t)dtos, Assembler::bcondNotEqual, cont); 2789 __ mem2reg_opt(Z_ARG2, at_tos(2)); // dtos (two word jvalue) 2790 2791 __ bind(cont); 2792 } 2793 // cache entry pointer 2794 2795 __ add2reg_with_index(Z_ARG3, in_bytes(cp_base_offset), Z_ARG3, Z_R1_scratch); 2796 2797 // object(tos) 2798 __ load_address(Z_ARG4, Address(Z_esp, Interpreter::stackElementSize)); 2799 // Z_ARG2: object pointer set up above (NULL if static) 2800 // Z_ARG3: cache entry pointer 2801 // Z_ARG4: jvalue object on the stack 2802 __ call_VM(noreg, 2803 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), 2804 Z_ARG2, Z_ARG3, Z_ARG4); 2805 __ get_cache_and_index_at_bcp(cache, index, 1); 2806 2807 __ bind(L1); 2808 BLOCK_COMMENT("} jvmti_post_field_mod"); 2809 } 2810 2811 2812 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2813 transition(vtos, vtos); 2814 2815 const Register cache = Z_tmp_1; 2816 const Register index = Z_ARG5; 2817 const Register obj = Z_tmp_1; 2818 const Register off = Z_tmp_2; 2819 const Register flags = Z_R1_scratch; 2820 const Register br_tab = Z_ARG5; 2821 const Register bc = Z_tmp_1; 2822 const Register oopStore_tmp1 = Z_R1_scratch; 2823 const Register oopStore_tmp2 = Z_ARG5; 2824 const Register oopStore_tmp3 = Z_R0_scratch; 2825 2826 resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); 2827 jvmti_post_field_mod(cache, index, is_static); 2828 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); 2829 // begin of life for: 2830 // obj, off long life range 2831 // flags short life range, up to branch into branch table 2832 // end of life for: 2833 // cache, index 2834 2835 const Address field(obj, off); 2836 Label is_Byte, is_Bool, is_Int, is_Short, is_Char, 2837 is_Long, is_Float, is_Object, is_Double; 2838 Label is_badState8, is_badState9, is_badStateA, is_badStateB, 2839 is_badStateC, is_badStateD, is_badStateE, is_badStateF, 2840 is_badState; 2841 Label branchTable, atosHandler, Done; 2842 bool do_rewrite = !is_static && (rc == may_rewrite); 2843 bool dont_rewrite = (is_static || (rc == may_not_rewrite)); 2844 2845 assert(do_rewrite == !dont_rewrite, "Oops, code is not fit for that"); 2846 2847 assert(btos == 0, "change code, btos != 0"); 2848 2849 #ifdef ASSERT 2850 const unsigned int bsize = is_static ? BTB_MINSIZE*1 : BTB_MINSIZE*4; 2851 #else 2852 const unsigned int bsize = is_static ? BTB_MINSIZE*1 : BTB_MINSIZE*8; 2853 #endif 2854 2855 // Calculate address of branch table entry and branch there. 2856 { 2857 const int bit_shift = exact_log2(bsize); // Size of each branch table entry. 2858 const int r_bitpos = 63 - bit_shift; 2859 const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::tos_state_bits + 1; 2860 const int n_rotate = (bit_shift-ConstantPoolCacheEntry::tos_state_shift); 2861 __ z_larl(br_tab, branchTable); 2862 __ rotate_then_insert(flags, flags, l_bitpos, r_bitpos, n_rotate, true); 2863 __ z_bc(Assembler::bcondAlways, 0, flags, br_tab); 2864 } 2865 // end of life for: 2866 // flags, br_tab 2867 2868 __ align_address(bsize); 2869 BIND(branchTable); 2870 2871 // btos 2872 BTB_BEGIN(is_Byte, bsize, "putfield_or_static:is_Byte"); 2873 __ pop(btos); 2874 if (!is_static) { 2875 pop_and_check_object(obj); 2876 } 2877 __ z_stc(Z_tos, field); 2878 if (do_rewrite) { 2879 patch_bytecode(Bytecodes::_fast_bputfield, bc, Z_ARG5, true, byte_no); 2880 } 2881 __ z_bru(Done); 2882 BTB_END( is_Byte, bsize, "putfield_or_static:is_Byte"); 2883 2884 // ztos 2885 BTB_BEGIN(is_Bool, bsize, "putfield_or_static:is_Bool"); 2886 __ pop(ztos); 2887 if (!is_static) { 2888 pop_and_check_object(obj); 2889 } 2890 __ z_nilf(Z_tos, 0x1); 2891 __ z_stc(Z_tos, field); 2892 if (do_rewrite) { 2893 patch_bytecode(Bytecodes::_fast_zputfield, bc, Z_ARG5, true, byte_no); 2894 } 2895 __ z_bru(Done); 2896 BTB_END(is_Bool, bsize, "putfield_or_static:is_Bool"); 2897 2898 // ctos 2899 BTB_BEGIN(is_Char, bsize, "putfield_or_static:is_Char"); 2900 __ pop(ctos); 2901 if (!is_static) { 2902 pop_and_check_object(obj); 2903 } 2904 __ z_sth(Z_tos, field); 2905 if (do_rewrite) { 2906 patch_bytecode(Bytecodes::_fast_cputfield, bc, Z_ARG5, true, byte_no); 2907 } 2908 __ z_bru(Done); 2909 BTB_END( is_Char, bsize, "putfield_or_static:is_Char"); 2910 2911 // stos 2912 BTB_BEGIN(is_Short, bsize, "putfield_or_static:is_Short"); 2913 __ pop(stos); 2914 if (!is_static) { 2915 pop_and_check_object(obj); 2916 } 2917 __ z_sth(Z_tos, field); 2918 if (do_rewrite) { 2919 patch_bytecode(Bytecodes::_fast_sputfield, bc, Z_ARG5, true, byte_no); 2920 } 2921 __ z_bru(Done); 2922 BTB_END( is_Short, bsize, "putfield_or_static:is_Short"); 2923 2924 // itos 2925 BTB_BEGIN(is_Int, bsize, "putfield_or_static:is_Int"); 2926 __ pop(itos); 2927 if (!is_static) { 2928 pop_and_check_object(obj); 2929 } 2930 __ reg2mem_opt(Z_tos, field, false); 2931 if (do_rewrite) { 2932 patch_bytecode(Bytecodes::_fast_iputfield, bc, Z_ARG5, true, byte_no); 2933 } 2934 __ z_bru(Done); 2935 BTB_END( is_Int, bsize, "putfield_or_static:is_Int"); 2936 2937 // ltos 2938 BTB_BEGIN(is_Long, bsize, "putfield_or_static:is_Long"); 2939 __ pop(ltos); 2940 if (!is_static) { 2941 pop_and_check_object(obj); 2942 } 2943 __ reg2mem_opt(Z_tos, field); 2944 if (do_rewrite) { 2945 patch_bytecode(Bytecodes::_fast_lputfield, bc, Z_ARG5, true, byte_no); 2946 } 2947 __ z_bru(Done); 2948 BTB_END( is_Long, bsize, "putfield_or_static:is_Long"); 2949 2950 // ftos 2951 BTB_BEGIN(is_Float, bsize, "putfield_or_static:is_Float"); 2952 __ pop(ftos); 2953 if (!is_static) { 2954 pop_and_check_object(obj); 2955 } 2956 __ freg2mem_opt(Z_ftos, field, false); 2957 if (do_rewrite) { 2958 patch_bytecode(Bytecodes::_fast_fputfield, bc, Z_ARG5, true, byte_no); 2959 } 2960 __ z_bru(Done); 2961 BTB_END( is_Float, bsize, "putfield_or_static:is_Float"); 2962 2963 // dtos 2964 BTB_BEGIN(is_Double, bsize, "putfield_or_static:is_Double"); 2965 __ pop(dtos); 2966 if (!is_static) { 2967 pop_and_check_object(obj); 2968 } 2969 __ freg2mem_opt(Z_ftos, field); 2970 if (do_rewrite) { 2971 patch_bytecode(Bytecodes::_fast_dputfield, bc, Z_ARG5, true, byte_no); 2972 } 2973 __ z_bru(Done); 2974 BTB_END( is_Double, bsize, "putfield_or_static:is_Double"); 2975 2976 // atos 2977 BTB_BEGIN(is_Object, bsize, "putfield_or_static:is_Object"); 2978 __ z_bru(atosHandler); 2979 BTB_END( is_Object, bsize, "putfield_or_static:is_Object"); 2980 2981 // Bad state detection comes at no extra runtime cost. 2982 BTB_BEGIN(is_badState8, bsize, "putfield_or_static:is_badState8"); 2983 __ z_illtrap(); 2984 __ z_bru(is_badState); 2985 BTB_END( is_badState8, bsize, "putfield_or_static:is_badState8"); 2986 BTB_BEGIN(is_badState9, bsize, "putfield_or_static:is_badState9"); 2987 __ z_illtrap(); 2988 __ z_bru(is_badState); 2989 BTB_END( is_badState9, bsize, "putfield_or_static:is_badState9"); 2990 BTB_BEGIN(is_badStateA, bsize, "putfield_or_static:is_badStateA"); 2991 __ z_illtrap(); 2992 __ z_bru(is_badState); 2993 BTB_END( is_badStateA, bsize, "putfield_or_static:is_badStateA"); 2994 BTB_BEGIN(is_badStateB, bsize, "putfield_or_static:is_badStateB"); 2995 __ z_illtrap(); 2996 __ z_bru(is_badState); 2997 BTB_END( is_badStateB, bsize, "putfield_or_static:is_badStateB"); 2998 BTB_BEGIN(is_badStateC, bsize, "putfield_or_static:is_badStateC"); 2999 __ z_illtrap(); 3000 __ z_bru(is_badState); 3001 BTB_END( is_badStateC, bsize, "putfield_or_static:is_badStateC"); 3002 BTB_BEGIN(is_badStateD, bsize, "putfield_or_static:is_badStateD"); 3003 __ z_illtrap(); 3004 __ z_bru(is_badState); 3005 BTB_END( is_badStateD, bsize, "putfield_or_static:is_badStateD"); 3006 BTB_BEGIN(is_badStateE, bsize, "putfield_or_static:is_badStateE"); 3007 __ z_illtrap(); 3008 __ z_bru(is_badState); 3009 BTB_END( is_badStateE, bsize, "putfield_or_static:is_badStateE"); 3010 BTB_BEGIN(is_badStateF, bsize, "putfield_or_static:is_badStateF"); 3011 __ z_illtrap(); 3012 __ z_bru(is_badState); 3013 BTB_END( is_badStateF, bsize, "putfield_or_static:is_badStateF"); 3014 3015 __ align_address(64); 3016 BIND(is_badState); // Do this outside branch table. Needs a lot of space. 3017 { 3018 unsigned int b_off = __ offset(); 3019 if (is_static) __ stop_static("Bad state in putstatic"); 3020 else __ stop_static("Bad state in putfield"); 3021 unsigned int e_off = __ offset(); 3022 } 3023 3024 __ align_address(64); 3025 BIND(atosHandler); // Oops are really complicated to handle. 3026 // There is a lot of code generated. 3027 // Therefore: generate the handler outside of branch table. 3028 // There is no performance penalty. The additional branch 3029 // to here is compensated for by the fallthru to "Done". 3030 { 3031 unsigned int b_off = __ offset(); 3032 __ pop(atos); 3033 if (!is_static) { 3034 pop_and_check_object(obj); 3035 } 3036 // Store into the field 3037 do_oop_store(_masm, obj, off, Z_tos, false, 3038 oopStore_tmp1, oopStore_tmp2, oopStore_tmp3, _bs->kind(), false); 3039 if (do_rewrite) { 3040 patch_bytecode(Bytecodes::_fast_aputfield, bc, Z_ARG5, true, byte_no); 3041 } 3042 // __ z_bru(Done); // fallthru 3043 unsigned int e_off = __ offset(); 3044 } 3045 3046 BIND(Done); 3047 3048 // Check for volatile store. 3049 Label notVolatile; 3050 3051 __ testbit(Z_ARG4, ConstantPoolCacheEntry::is_volatile_shift); 3052 __ z_brz(notVolatile); 3053 __ z_fence(); 3054 3055 BIND(notVolatile); 3056 } 3057 3058 void TemplateTable::putfield(int byte_no) { 3059 BLOCK_COMMENT("putfield {"); 3060 putfield_or_static(byte_no, false); 3061 BLOCK_COMMENT("} putfield"); 3062 } 3063 3064 void TemplateTable::nofast_putfield(int byte_no) { 3065 putfield_or_static(byte_no, false, may_not_rewrite); 3066 } 3067 3068 void TemplateTable::putstatic(int byte_no) { 3069 BLOCK_COMMENT("putstatic {"); 3070 putfield_or_static(byte_no, true); 3071 BLOCK_COMMENT("} putstatic"); 3072 } 3073 3074 // Push the tos value back to the stack. 3075 // gc will find oops there and update. 3076 void TemplateTable::jvmti_post_fast_field_mod() { 3077 3078 if (!JvmtiExport::can_post_field_modification()) { 3079 return; 3080 } 3081 3082 // Check to see if a field modification watch has been set before 3083 // we take the time to call into the VM. 3084 Label exit; 3085 3086 BLOCK_COMMENT("jvmti_post_fast_field_mod {"); 3087 3088 __ load_absolute_address(Z_R1_scratch, 3089 (address) JvmtiExport::get_field_modification_count_addr()); 3090 __ load_and_test_int(Z_R0_scratch, Address(Z_R1_scratch)); 3091 __ z_brz(exit); 3092 3093 Register obj = Z_tmp_1; 3094 3095 __ pop_ptr(obj); // Copy the object pointer from tos. 3096 __ verify_oop(obj); 3097 __ push_ptr(obj); // Put the object pointer back on tos. 3098 3099 // Save tos values before call_VM() clobbers them. Since we have 3100 // to do it for every data type, we use the saved values as the 3101 // jvalue object. 3102 switch (bytecode()) { // Load values into the jvalue object. 3103 case Bytecodes::_fast_aputfield: 3104 __ push_ptr(Z_tos); 3105 break; 3106 case Bytecodes::_fast_bputfield: 3107 case Bytecodes::_fast_zputfield: 3108 case Bytecodes::_fast_sputfield: 3109 case Bytecodes::_fast_cputfield: 3110 case Bytecodes::_fast_iputfield: 3111 __ push_i(Z_tos); 3112 break; 3113 case Bytecodes::_fast_dputfield: 3114 __ push_d(); 3115 break; 3116 case Bytecodes::_fast_fputfield: 3117 __ push_f(); 3118 break; 3119 case Bytecodes::_fast_lputfield: 3120 __ push_l(Z_tos); 3121 break; 3122 3123 default: 3124 ShouldNotReachHere(); 3125 } 3126 3127 // jvalue on the stack 3128 __ load_address(Z_ARG4, Address(Z_esp, Interpreter::stackElementSize)); 3129 // Access constant pool cache entry. 3130 __ get_cache_entry_pointer_at_bcp(Z_ARG3, Z_tos, 1); 3131 __ verify_oop(obj); 3132 3133 // obj : object pointer copied above 3134 // Z_ARG3: cache entry pointer 3135 // Z_ARG4: jvalue object on the stack 3136 __ call_VM(noreg, 3137 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), 3138 obj, Z_ARG3, Z_ARG4); 3139 3140 switch (bytecode()) { // Restore tos values. 3141 case Bytecodes::_fast_aputfield: 3142 __ pop_ptr(Z_tos); 3143 break; 3144 case Bytecodes::_fast_bputfield: 3145 case Bytecodes::_fast_zputfield: 3146 case Bytecodes::_fast_sputfield: 3147 case Bytecodes::_fast_cputfield: 3148 case Bytecodes::_fast_iputfield: 3149 __ pop_i(Z_tos); 3150 break; 3151 case Bytecodes::_fast_dputfield: 3152 __ pop_d(Z_ftos); 3153 break; 3154 case Bytecodes::_fast_fputfield: 3155 __ pop_f(Z_ftos); 3156 break; 3157 case Bytecodes::_fast_lputfield: 3158 __ pop_l(Z_tos); 3159 break; 3160 } 3161 3162 __ bind(exit); 3163 BLOCK_COMMENT("} jvmti_post_fast_field_mod"); 3164 } 3165 3166 void TemplateTable::fast_storefield(TosState state) { 3167 transition(state, vtos); 3168 3169 ByteSize base = ConstantPoolCache::base_offset(); 3170 jvmti_post_fast_field_mod(); 3171 3172 // Access constant pool cache. 3173 Register cache = Z_tmp_1; 3174 Register index = Z_tmp_2; 3175 Register flags = Z_ARG5; 3176 3177 // Index comes in bytes, don't shift afterwards! 3178 __ get_cache_and_index_at_bcp(cache, index, 1); 3179 3180 // Test for volatile. 3181 assert(!flags->is_volatile(), "do_oop_store could perform leaf RT call"); 3182 __ z_lg(flags, Address(cache, index, base + ConstantPoolCacheEntry::flags_offset())); 3183 3184 // Replace index with field offset from cache entry. 3185 Register field_offset = index; 3186 __ z_lg(field_offset, Address(cache, index, base + ConstantPoolCacheEntry::f2_offset())); 3187 3188 // Get object from stack. 3189 Register obj = cache; 3190 3191 pop_and_check_object(obj); 3192 3193 // field address 3194 const Address field(obj, field_offset); 3195 3196 // access field 3197 switch (bytecode()) { 3198 case Bytecodes::_fast_aputfield: 3199 do_oop_store(_masm, obj, field_offset, Z_tos, false, 3200 Z_ARG2, Z_ARG3, Z_ARG4, _bs->kind(), false); 3201 break; 3202 case Bytecodes::_fast_lputfield: 3203 __ reg2mem_opt(Z_tos, field); 3204 break; 3205 case Bytecodes::_fast_iputfield: 3206 __ reg2mem_opt(Z_tos, field, false); 3207 break; 3208 case Bytecodes::_fast_zputfield: 3209 __ z_nilf(Z_tos, 0x1); 3210 // fall through to bputfield 3211 case Bytecodes::_fast_bputfield: 3212 __ z_stc(Z_tos, field); 3213 break; 3214 case Bytecodes::_fast_sputfield: 3215 // fall through 3216 case Bytecodes::_fast_cputfield: 3217 __ z_sth(Z_tos, field); 3218 break; 3219 case Bytecodes::_fast_fputfield: 3220 __ freg2mem_opt(Z_ftos, field, false); 3221 break; 3222 case Bytecodes::_fast_dputfield: 3223 __ freg2mem_opt(Z_ftos, field); 3224 break; 3225 default: 3226 ShouldNotReachHere(); 3227 } 3228 3229 // Check for volatile store. 3230 Label notVolatile; 3231 3232 __ testbit(flags, ConstantPoolCacheEntry::is_volatile_shift); 3233 __ z_brz(notVolatile); 3234 __ z_fence(); 3235 3236 __ bind(notVolatile); 3237 } 3238 3239 void TemplateTable::fast_accessfield(TosState state) { 3240 transition(atos, state); 3241 3242 Register obj = Z_tos; 3243 3244 // Do the JVMTI work here to avoid disturbing the register state below 3245 if (JvmtiExport::can_post_field_access()) { 3246 // Check to see if a field access watch has been set before we 3247 // take the time to call into the VM. 3248 Label cont; 3249 3250 __ load_absolute_address(Z_R1_scratch, 3251 (address)JvmtiExport::get_field_access_count_addr()); 3252 __ load_and_test_int(Z_R0_scratch, Address(Z_R1_scratch)); 3253 __ z_brz(cont); 3254 3255 // Access constant pool cache entry. 3256 3257 __ get_cache_entry_pointer_at_bcp(Z_ARG3, Z_tmp_1, 1); 3258 __ verify_oop(obj); 3259 __ push_ptr(obj); // Save object pointer before call_VM() clobbers it. 3260 __ z_lgr(Z_ARG2, obj); 3261 3262 // Z_ARG2: object pointer copied above 3263 // Z_ARG3: cache entry pointer 3264 __ call_VM(noreg, 3265 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), 3266 Z_ARG2, Z_ARG3); 3267 __ pop_ptr(obj); // Restore object pointer. 3268 3269 __ bind(cont); 3270 } 3271 3272 // Access constant pool cache. 3273 Register cache = Z_tmp_1; 3274 Register index = Z_tmp_2; 3275 3276 // Index comes in bytes, don't shift afterwards! 3277 __ get_cache_and_index_at_bcp(cache, index, 1); 3278 // Replace index with field offset from cache entry. 3279 __ mem2reg_opt(index, 3280 Address(cache, index, 3281 ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())); 3282 3283 __ verify_oop(obj); 3284 __ null_check(obj); 3285 3286 Address field(obj, index); 3287 3288 // access field 3289 switch (bytecode()) { 3290 case Bytecodes::_fast_agetfield: 3291 __ load_heap_oop(Z_tos, field); 3292 __ verify_oop(Z_tos); 3293 return; 3294 case Bytecodes::_fast_lgetfield: 3295 __ mem2reg_opt(Z_tos, field); 3296 return; 3297 case Bytecodes::_fast_igetfield: 3298 __ mem2reg_opt(Z_tos, field, false); 3299 return; 3300 case Bytecodes::_fast_bgetfield: 3301 __ z_lb(Z_tos, field); 3302 return; 3303 case Bytecodes::_fast_sgetfield: 3304 __ z_lh(Z_tos, field); 3305 return; 3306 case Bytecodes::_fast_cgetfield: 3307 __ z_llgh(Z_tos, field); // Load into 64 bits, works on all CPUs. 3308 return; 3309 case Bytecodes::_fast_fgetfield: 3310 __ mem2freg_opt(Z_ftos, field, false); 3311 return; 3312 case Bytecodes::_fast_dgetfield: 3313 __ mem2freg_opt(Z_ftos, field); 3314 return; 3315 default: 3316 ShouldNotReachHere(); 3317 } 3318 } 3319 3320 void TemplateTable::fast_xaccess(TosState state) { 3321 transition(vtos, state); 3322 3323 Register receiver = Z_tos; 3324 // Get receiver. 3325 __ mem2reg_opt(Z_tos, aaddress(0)); 3326 3327 // Access constant pool cache. 3328 Register cache = Z_tmp_1; 3329 Register index = Z_tmp_2; 3330 3331 // Index comes in bytes, don't shift afterwards! 3332 __ get_cache_and_index_at_bcp(cache, index, 2); 3333 // Replace index with field offset from cache entry. 3334 __ mem2reg_opt(index, 3335 Address(cache, index, 3336 ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())); 3337 3338 // Make sure exception is reported in correct bcp range (getfield is 3339 // next instruction). 3340 __ add2reg(Z_bcp, 1); 3341 __ null_check(receiver); 3342 switch (state) { 3343 case itos: 3344 __ mem2reg_opt(Z_tos, Address(receiver, index), false); 3345 break; 3346 case atos: 3347 __ load_heap_oop(Z_tos, Address(receiver, index)); 3348 __ verify_oop(Z_tos); 3349 break; 3350 case ftos: 3351 __ mem2freg_opt(Z_ftos, Address(receiver, index)); 3352 break; 3353 default: 3354 ShouldNotReachHere(); 3355 } 3356 3357 // Reset bcp to original position. 3358 __ add2reg(Z_bcp, -1); 3359 } 3360 3361 //----------------------------------------------------------------------------- 3362 // Calls 3363 3364 void TemplateTable::prepare_invoke(int byte_no, 3365 Register method, // linked method (or i-klass) 3366 Register index, // itable index, MethodType, etc. 3367 Register recv, // If caller wants to see it. 3368 Register flags) { // If caller wants to test it. 3369 // Determine flags. 3370 const Bytecodes::Code code = bytecode(); 3371 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 3372 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 3373 const bool is_invokehandle = code == Bytecodes::_invokehandle; 3374 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 3375 const bool is_invokespecial = code == Bytecodes::_invokespecial; 3376 const bool load_receiver = (recv != noreg); 3377 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 3378 3379 // Setup registers & access constant pool cache. 3380 if (recv == noreg) { recv = Z_ARG1; } 3381 if (flags == noreg) { flags = Z_ARG2; } 3382 assert_different_registers(method, Z_R14, index, recv, flags); 3383 3384 BLOCK_COMMENT("prepare_invoke {"); 3385 3386 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); 3387 3388 // Maybe push appendix to arguments. 3389 if (is_invokedynamic || is_invokehandle) { 3390 Label L_no_push; 3391 Register resolved_reference = Z_R1_scratch; 3392 __ testbit(flags, ConstantPoolCacheEntry::has_appendix_shift); 3393 __ z_bfalse(L_no_push); 3394 // Push the appendix as a trailing parameter. 3395 // This must be done before we get the receiver, 3396 // since the parameter_size includes it. 3397 __ load_resolved_reference_at_index(resolved_reference, index); 3398 __ verify_oop(resolved_reference); 3399 __ push_ptr(resolved_reference); // Push appendix (MethodType, CallSite, etc.). 3400 __ bind(L_no_push); 3401 } 3402 3403 // Load receiver if needed (after appendix is pushed so parameter size is correct). 3404 if (load_receiver) { 3405 assert(!is_invokedynamic, ""); 3406 // recv := int2long(flags & ConstantPoolCacheEntry::parameter_size_mask) << 3 3407 // Flags is zero-extended int2long when loaded during load_invoke_cp_cache_entry(). 3408 // Only the least significant byte (psize) of flags is used. 3409 { 3410 const unsigned int logSES = Interpreter::logStackElementSize; 3411 const int bit_shift = logSES; 3412 const int r_bitpos = 63 - bit_shift; 3413 const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::parameter_size_bits + 1; 3414 const int n_rotate = bit_shift; 3415 assert(ConstantPoolCacheEntry::parameter_size_mask == 255, "adapt bitpositions"); 3416 __ rotate_then_insert(recv, flags, l_bitpos, r_bitpos, n_rotate, true); 3417 } 3418 // Recv now contains #arguments * StackElementSize. 3419 3420 Address recv_addr(Z_esp, recv); 3421 __ z_lg(recv, recv_addr); 3422 __ verify_oop(recv); 3423 } 3424 3425 // Compute return type. 3426 // ret_type is used by callers (invokespecial, invokestatic) at least. 3427 Register ret_type = Z_R1_scratch; 3428 assert_different_registers(ret_type, method); 3429 3430 const address table_addr = (address)Interpreter::invoke_return_entry_table_for(code); 3431 __ load_absolute_address(Z_R14, table_addr); 3432 3433 { 3434 const int bit_shift = LogBytesPerWord; // Size of each table entry. 3435 const int r_bitpos = 63 - bit_shift; 3436 const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::tos_state_bits + 1; 3437 const int n_rotate = bit_shift-ConstantPoolCacheEntry::tos_state_shift; 3438 __ rotate_then_insert(ret_type, flags, l_bitpos, r_bitpos, n_rotate, true); 3439 // Make sure we don't need to mask flags for tos_state after the above shift. 3440 ConstantPoolCacheEntry::verify_tos_state_shift(); 3441 } 3442 3443 __ z_lg(Z_R14, Address(Z_R14, ret_type)); // Load return address. 3444 BLOCK_COMMENT("} prepare_invoke"); 3445 } 3446 3447 3448 void TemplateTable::invokevirtual_helper(Register index, 3449 Register recv, 3450 Register flags) { 3451 // Uses temporary registers Z_tmp_2, Z_ARG4. 3452 assert_different_registers(index, recv, Z_tmp_2, Z_ARG4); 3453 3454 // Test for an invoke of a final method. 3455 Label notFinal; 3456 3457 BLOCK_COMMENT("invokevirtual_helper {"); 3458 3459 __ testbit(flags, ConstantPoolCacheEntry::is_vfinal_shift); 3460 __ z_brz(notFinal); 3461 3462 const Register method = index; // Method must be Z_ARG3. 3463 assert(method == Z_ARG3, "method must be second argument for interpreter calling convention"); 3464 3465 // Do the call - the index is actually the method to call. 3466 // That is, f2 is a vtable index if !is_vfinal, else f2 is a method. 3467 3468 // It's final, need a null check here! 3469 __ null_check(recv); 3470 3471 // Profile this call. 3472 __ profile_final_call(Z_tmp_2); 3473 __ profile_arguments_type(Z_tmp_2, method, Z_ARG5, true); // Argument type profiling. 3474 __ jump_from_interpreted(method, Z_tmp_2); 3475 3476 __ bind(notFinal); 3477 3478 // Get receiver klass. 3479 __ null_check(recv, Z_R0_scratch, oopDesc::klass_offset_in_bytes()); 3480 __ load_klass(Z_tmp_2, recv); 3481 3482 // Profile this call. 3483 __ profile_virtual_call(Z_tmp_2, Z_ARG4, Z_ARG5); 3484 3485 // Get target method & entry point. 3486 __ z_sllg(index, index, exact_log2(vtableEntry::size_in_bytes())); 3487 __ mem2reg_opt(method, 3488 Address(Z_tmp_2, index, 3489 Klass::vtable_start_offset() + in_ByteSize(vtableEntry::method_offset_in_bytes()))); 3490 __ profile_arguments_type(Z_ARG4, method, Z_ARG5, true); 3491 __ jump_from_interpreted(method, Z_ARG4); 3492 BLOCK_COMMENT("} invokevirtual_helper"); 3493 } 3494 3495 void TemplateTable::invokevirtual(int byte_no) { 3496 transition(vtos, vtos); 3497 3498 assert(byte_no == f2_byte, "use this argument"); 3499 prepare_invoke(byte_no, 3500 Z_ARG3, // method or vtable index 3501 noreg, // unused itable index 3502 Z_ARG1, // recv 3503 Z_ARG2); // flags 3504 3505 // Z_ARG3 : index 3506 // Z_ARG1 : receiver 3507 // Z_ARG2 : flags 3508 invokevirtual_helper(Z_ARG3, Z_ARG1, Z_ARG2); 3509 } 3510 3511 void TemplateTable::invokespecial(int byte_no) { 3512 transition(vtos, vtos); 3513 3514 assert(byte_no == f1_byte, "use this argument"); 3515 Register Rmethod = Z_tmp_2; 3516 prepare_invoke(byte_no, Rmethod, noreg, // Get f1 method. 3517 Z_ARG3); // Get receiver also for null check. 3518 __ verify_oop(Z_ARG3); 3519 __ null_check(Z_ARG3); 3520 // Do the call. 3521 __ profile_call(Z_ARG2); 3522 __ profile_arguments_type(Z_ARG2, Rmethod, Z_ARG5, false); 3523 __ jump_from_interpreted(Rmethod, Z_R1_scratch); 3524 } 3525 3526 void TemplateTable::invokestatic(int byte_no) { 3527 transition(vtos, vtos); 3528 3529 assert(byte_no == f1_byte, "use this argument"); 3530 Register Rmethod = Z_tmp_2; 3531 prepare_invoke(byte_no, Rmethod); // Get f1 method. 3532 // Do the call. 3533 __ profile_call(Z_ARG2); 3534 __ profile_arguments_type(Z_ARG2, Rmethod, Z_ARG5, false); 3535 __ jump_from_interpreted(Rmethod, Z_R1_scratch); 3536 } 3537 3538 // Outdated feature, and we don't support it. 3539 void TemplateTable::fast_invokevfinal(int byte_no) { 3540 transition(vtos, vtos); 3541 assert(byte_no == f2_byte, "use this argument"); 3542 __ stop("fast_invokevfinal not used on linuxs390x"); 3543 } 3544 3545 void TemplateTable::invokeinterface(int byte_no) { 3546 transition(vtos, vtos); 3547 3548 assert(byte_no == f1_byte, "use this argument"); 3549 Register interface = Z_tos; 3550 Register index = Z_ARG3; 3551 Register receiver = Z_tmp_1; 3552 Register flags = Z_ARG5; 3553 3554 BLOCK_COMMENT("invokeinterface {"); 3555 3556 // Destroys Z_ARG1 and Z_ARG2, thus use Z_ARG4 and copy afterwards. 3557 prepare_invoke(byte_no, Z_ARG4, index, // Get f1 klassOop, f2 itable index. 3558 receiver, flags); 3559 3560 // Z_R14 (== Z_bytecode) : return entry 3561 3562 __ z_lgr(interface, Z_ARG4); 3563 3564 // Special case of invokeinterface called for virtual method of 3565 // java.lang.Object. See cpCacheOop.cpp for details. 3566 // This code isn't produced by javac, but could be produced by 3567 // another compliant java compiler. 3568 Label notMethod; 3569 __ testbit(flags, ConstantPoolCacheEntry::is_forced_virtual_shift); 3570 __ z_brz(notMethod); 3571 invokevirtual_helper(index, receiver, flags); 3572 __ bind(notMethod); 3573 3574 // Get receiver klass into klass - also a null check. 3575 Register klass = flags; 3576 3577 __ restore_locals(); 3578 __ load_klass(klass, receiver); 3579 3580 // Profile this call. 3581 __ profile_virtual_call(klass, Z_ARG2/*mdp*/, Z_ARG4/*scratch*/); 3582 3583 NearLabel no_such_interface, no_such_method; 3584 Register method = Z_tmp_2; 3585 3586 // TK 2010-08-24: save the index to Z_ARG4. needed in case of an error 3587 // in throw_AbstractMethodErrorByTemplateTable 3588 __ z_lgr(Z_ARG4, index); 3589 // TK 2011-03-24: copy also klass because it could be changed in 3590 // lookup_interface_method 3591 __ z_lgr(Z_ARG2, klass); 3592 __ lookup_interface_method(// inputs: rec. class, interface, itable index 3593 klass, interface, index, 3594 // outputs: method, scan temp. reg 3595 method, Z_tmp_2, Z_R1_scratch, 3596 no_such_interface); 3597 3598 // Check for abstract method error. 3599 // Note: This should be done more efficiently via a throw_abstract_method_error 3600 // interpreter entry point and a conditional jump to it in case of a null 3601 // method. 3602 __ compareU64_and_branch(method, (intptr_t) 0, 3603 Assembler::bcondZero, no_such_method); 3604 3605 __ profile_arguments_type(Z_ARG3, method, Z_ARG5, true); 3606 3607 // Do the call. 3608 __ jump_from_interpreted(method, Z_ARG5); 3609 __ should_not_reach_here(); 3610 3611 // exception handling code follows... 3612 // Note: Must restore interpreter registers to canonical 3613 // state for exception handling to work correctly! 3614 3615 __ bind(no_such_method); 3616 3617 // Throw exception. 3618 __ restore_bcp(); // Bcp must be correct for exception handler (was destroyed). 3619 __ restore_locals(); // Make sure locals pointer is correct as well (was destroyed). 3620 // TK 2010-08-24: Call throw_AbstractMethodErrorByTemplateTable now with the 3621 // relevant information for generating a better error message 3622 __ call_VM(noreg, 3623 CAST_FROM_FN_PTR(address, 3624 InterpreterRuntime::throw_AbstractMethodError), 3625 Z_ARG2, interface, Z_ARG4); 3626 // The call_VM checks for exception, so we should never return here. 3627 __ should_not_reach_here(); 3628 3629 __ bind(no_such_interface); 3630 3631 // Throw exception. 3632 __ restore_bcp(); // Bcp must be correct for exception handler (was destroyed). 3633 __ restore_locals(); // Make sure locals pointer is correct as well (was destroyed). 3634 // TK 2010-08-24: Call throw_IncompatibleClassChangeErrorByTemplateTable now with the 3635 // relevant information for generating a better error message 3636 __ call_VM(noreg, 3637 CAST_FROM_FN_PTR(address, 3638 InterpreterRuntime::throw_IncompatibleClassChangeError), 3639 Z_ARG2, interface); 3640 // The call_VM checks for exception, so we should never return here. 3641 __ should_not_reach_here(); 3642 3643 BLOCK_COMMENT("} invokeinterface"); 3644 return; 3645 } 3646 3647 void TemplateTable::invokehandle(int byte_no) { 3648 transition(vtos, vtos); 3649 3650 const Register method = Z_tmp_2; 3651 const Register recv = Z_ARG5; 3652 const Register mtype = Z_tmp_1; 3653 prepare_invoke(byte_no, 3654 method, mtype, // Get f2 method, f1 MethodType. 3655 recv); 3656 __ verify_method_ptr(method); 3657 __ verify_oop(recv); 3658 __ null_check(recv); 3659 3660 // Note: Mtype is already pushed (if necessary) by prepare_invoke. 3661 3662 // FIXME: profile the LambdaForm also. 3663 __ profile_final_call(Z_ARG2); 3664 __ profile_arguments_type(Z_ARG3, method, Z_ARG5, true); 3665 3666 __ jump_from_interpreted(method, Z_ARG3); 3667 } 3668 3669 void TemplateTable::invokedynamic(int byte_no) { 3670 transition(vtos, vtos); 3671 3672 const Register Rmethod = Z_tmp_2; 3673 const Register Rcallsite = Z_tmp_1; 3674 3675 prepare_invoke(byte_no, Rmethod, Rcallsite); 3676 3677 // Rmethod: CallSite object (from f1) 3678 // Rcallsite: MH.linkToCallSite method (from f2) 3679 3680 // Note: Callsite is already pushed by prepare_invoke. 3681 3682 // TODO: should make a type profile for any invokedynamic that takes a ref argument. 3683 // Profile this call. 3684 __ profile_call(Z_ARG2); 3685 __ profile_arguments_type(Z_ARG2, Rmethod, Z_ARG5, false); 3686 __ jump_from_interpreted(Rmethod, Z_ARG2); 3687 } 3688 3689 //----------------------------------------------------------------------------- 3690 // Allocation 3691 3692 // Original comment on "allow_shared_alloc": 3693 // Always go the slow path. 3694 // + Eliminated optimization within the template-based interpreter: 3695 // If an allocation is done within the interpreter without using 3696 // tlabs, the interpreter tries to do the allocation directly 3697 // on the heap. 3698 // + That means the profiling hooks are not considered and allocations 3699 // get lost for the profiling framework. 3700 // + However, we do not think that this optimization is really needed, 3701 // so we always go now the slow path through the VM in this case -- 3702 // spec jbb2005 shows no measurable performance degradation. 3703 void TemplateTable::_new() { 3704 transition(vtos, atos); 3705 address prev_instr_address = NULL; 3706 Register tags = Z_tmp_1; 3707 Register RallocatedObject = Z_tos; 3708 Register cpool = Z_ARG2; 3709 Register tmp = Z_ARG3; // RobjectFields==tmp and Rsize==offset must be a register pair. 3710 Register offset = Z_ARG4; 3711 Label slow_case; 3712 Label done; 3713 Label initialize_header; 3714 Label initialize_object; // Including clearing the fields. 3715 Label allocate_shared; 3716 3717 BLOCK_COMMENT("TemplateTable::_new {"); 3718 __ get_2_byte_integer_at_bcp(offset/*dest*/, 1, InterpreterMacroAssembler::Unsigned); 3719 __ get_cpool_and_tags(cpool, tags); 3720 // Make sure the class we're about to instantiate has been resolved. 3721 // This is done before loading InstanceKlass to be consistent with the order 3722 // how Constant Pool is updated (see ConstantPool::klass_at_put). 3723 const int tags_offset = Array<u1>::base_offset_in_bytes(); 3724 __ load_address(tmp, Address(tags, offset, tags_offset)); 3725 __ z_cli(0, tmp, JVM_CONSTANT_Class); 3726 __ z_brne(slow_case); 3727 3728 __ z_sllg(offset, offset, LogBytesPerWord); // Convert to to offset. 3729 // Get InstanceKlass. 3730 Register iklass = cpool; 3731 __ load_resolved_klass_at_offset(cpool, offset, iklass); 3732 3733 // Make sure klass is initialized & doesn't have finalizer. 3734 // Make sure klass is fully initialized. 3735 const int state_offset = in_bytes(InstanceKlass::init_state_offset()); 3736 if (Immediate::is_uimm12(state_offset)) { 3737 __ z_cli(state_offset, iklass, InstanceKlass::fully_initialized); 3738 } else { 3739 __ z_cliy(state_offset, iklass, InstanceKlass::fully_initialized); 3740 } 3741 __ z_brne(slow_case); 3742 3743 // Get instance_size in InstanceKlass (scaled to a count of bytes). 3744 Register Rsize = offset; 3745 const int mask = 1 << Klass::_lh_instance_slow_path_bit; 3746 __ z_llgf(Rsize, Address(iklass, Klass::layout_helper_offset())); 3747 __ z_tmll(Rsize, mask); 3748 __ z_btrue(slow_case); 3749 3750 // Allocate the instance 3751 // 1) Try to allocate in the TLAB. 3752 // 2) If fail and the object is large allocate in the shared Eden. 3753 // 3) If the above fails (or is not applicable), go to a slow case 3754 // (creates a new TLAB, etc.). 3755 3756 // Always go the slow path. See comment above this template. 3757 const bool allow_shared_alloc = false; 3758 3759 if (UseTLAB) { 3760 Register RoldTopValue = RallocatedObject; 3761 Register RnewTopValue = tmp; 3762 __ z_lg(RoldTopValue, Address(Z_thread, JavaThread::tlab_top_offset())); 3763 __ load_address(RnewTopValue, Address(RoldTopValue, Rsize)); 3764 __ z_cg(RnewTopValue, Address(Z_thread, JavaThread::tlab_end_offset())); 3765 __ z_brh(allow_shared_alloc ? allocate_shared : slow_case); 3766 __ z_stg(RnewTopValue, Address(Z_thread, JavaThread::tlab_top_offset())); 3767 if (ZeroTLAB) { 3768 // The fields have been already cleared. 3769 __ z_bru(initialize_header); 3770 } else { 3771 // Initialize both the header and fields. 3772 if (allow_shared_alloc) { 3773 __ z_bru(initialize_object); 3774 } else { 3775 // Fallthrough to initialize_object, but assert that it is on fall through path. 3776 prev_instr_address = __ pc(); 3777 } 3778 } 3779 } 3780 3781 if (allow_shared_alloc) { 3782 // Allocation in shared Eden not implemented, because sapjvm allocation trace does not allow it. 3783 Unimplemented(); 3784 } 3785 3786 if (UseTLAB) { 3787 Register RobjectFields = tmp; 3788 Register Rzero = Z_R1_scratch; 3789 3790 assert(ZeroTLAB || prev_instr_address == __ pc(), 3791 "must not omit jump to initialize_object above, as it is not on the fall through path"); 3792 __ clear_reg(Rzero, true /*whole reg*/, false); // Load 0L into Rzero. Don't set CC. 3793 3794 // The object is initialized before the header. If the object size is 3795 // zero, go directly to the header initialization. 3796 __ bind(initialize_object); 3797 __ z_aghi(Rsize, (int)-sizeof(oopDesc)); // Subtract header size, set CC. 3798 __ z_bre(initialize_header); // Jump if size of fields is zero. 3799 3800 // Initialize object fields. 3801 // See documentation for MVCLE instruction!!! 3802 assert(RobjectFields->encoding() % 2 == 0, "RobjectFields must be an even register"); 3803 assert(Rsize->encoding() == (RobjectFields->encoding()+1), 3804 "RobjectFields and Rsize must be a register pair"); 3805 assert(Rzero->encoding() % 2 == 1, "Rzero must be an odd register"); 3806 3807 // Set Rzero to 0 and use it as src length, then mvcle will copy nothing 3808 // and fill the object with the padding value 0. 3809 __ add2reg(RobjectFields, sizeof(oopDesc), RallocatedObject); 3810 __ move_long_ext(RobjectFields, as_Register(Rzero->encoding() - 1), 0); 3811 3812 // Initialize object header only. 3813 __ bind(initialize_header); 3814 if (UseBiasedLocking) { 3815 Register prototype = RobjectFields; 3816 __ z_lg(prototype, Address(iklass, Klass::prototype_header_offset())); 3817 __ z_stg(prototype, Address(RallocatedObject, oopDesc::mark_offset_in_bytes())); 3818 } else { 3819 __ store_const(Address(RallocatedObject, oopDesc::mark_offset_in_bytes()), 3820 (long)markOopDesc::prototype()); 3821 } 3822 3823 __ store_klass_gap(Rzero, RallocatedObject); // Zero klass gap for compressed oops. 3824 __ store_klass(iklass, RallocatedObject); // Store klass last. 3825 3826 { 3827 SkipIfEqual skip(_masm, &DTraceAllocProbes, false, Z_ARG5 /*scratch*/); 3828 // Trigger dtrace event for fastpath. 3829 __ push(atos); // Save the return value. 3830 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), RallocatedObject); 3831 __ pop(atos); // Restore the return value. 3832 } 3833 __ z_bru(done); 3834 } 3835 3836 // slow case 3837 __ bind(slow_case); 3838 __ get_constant_pool(Z_ARG2); 3839 __ get_2_byte_integer_at_bcp(Z_ARG3/*dest*/, 1, InterpreterMacroAssembler::Unsigned); 3840 call_VM(Z_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Z_ARG2, Z_ARG3); 3841 __ verify_oop(Z_tos); 3842 3843 // continue 3844 __ bind(done); 3845 3846 BLOCK_COMMENT("} TemplateTable::_new"); 3847 } 3848 3849 void TemplateTable::newarray() { 3850 transition(itos, atos); 3851 3852 // Call runtime. 3853 __ z_llgc(Z_ARG2, at_bcp(1)); // type 3854 __ z_lgfr(Z_ARG3, Z_tos); // size 3855 call_VM(Z_RET, 3856 CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), 3857 Z_ARG2, Z_ARG3); 3858 } 3859 3860 void TemplateTable::anewarray() { 3861 transition(itos, atos); 3862 __ get_2_byte_integer_at_bcp(Z_ARG3, 1, InterpreterMacroAssembler::Unsigned); 3863 __ get_constant_pool(Z_ARG2); 3864 __ z_lgfr(Z_ARG4, Z_tos); 3865 call_VM(Z_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), 3866 Z_ARG2, Z_ARG3, Z_ARG4); 3867 } 3868 3869 void TemplateTable::arraylength() { 3870 transition(atos, itos); 3871 3872 int offset = arrayOopDesc::length_offset_in_bytes(); 3873 3874 __ null_check(Z_tos, Z_R0_scratch, offset); 3875 __ mem2reg_opt(Z_tos, Address(Z_tos, offset), false); 3876 } 3877 3878 void TemplateTable::checkcast() { 3879 transition(atos, atos); 3880 3881 NearLabel done, is_null, ok_is_subtype, quicked, resolved; 3882 3883 BLOCK_COMMENT("checkcast {"); 3884 // If object is NULL, we are almost done. 3885 __ compareU64_and_branch(Z_tos, (intptr_t) 0, Assembler::bcondZero, is_null); 3886 3887 // Get cpool & tags index. 3888 Register cpool = Z_tmp_1; 3889 Register tags = Z_tmp_2; 3890 Register index = Z_ARG5; 3891 3892 __ get_cpool_and_tags(cpool, tags); 3893 __ get_2_byte_integer_at_bcp(index, 1, InterpreterMacroAssembler::Unsigned); 3894 // See if bytecode has already been quicked. 3895 // Note: For CLI, we would have to add the index to the tags pointer first, 3896 // thus load and compare in a "classic" manner. 3897 __ z_llgc(Z_R0_scratch, 3898 Address(tags, index, Array<u1>::base_offset_in_bytes())); 3899 __ compareU64_and_branch(Z_R0_scratch, JVM_CONSTANT_Class, 3900 Assembler::bcondEqual, quicked); 3901 3902 __ push(atos); // Save receiver for result, and for GC. 3903 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3904 __ get_vm_result_2(Z_tos); 3905 3906 Register receiver = Z_ARG4; 3907 Register klass = Z_tos; 3908 Register subklass = Z_ARG5; 3909 3910 __ pop_ptr(receiver); // restore receiver 3911 __ z_bru(resolved); 3912 3913 // Get superklass in klass and subklass in subklass. 3914 __ bind(quicked); 3915 3916 __ z_lgr(Z_ARG4, Z_tos); // Save receiver. 3917 __ z_sllg(index, index, LogBytesPerWord); // index2bytes for addressing 3918 __ load_resolved_klass_at_offset(cpool, index, klass); 3919 3920 __ bind(resolved); 3921 3922 __ load_klass(subklass, receiver); 3923 3924 // Generate subtype check. Object in receiver. 3925 // Superklass in klass. Subklass in subklass. 3926 __ gen_subtype_check(subklass, klass, Z_ARG3, Z_tmp_1, ok_is_subtype); 3927 3928 // Come here on failure. 3929 __ push_ptr(receiver); 3930 // Object is at TOS, target klass oop expected in rax by convention. 3931 __ z_brul((address) Interpreter::_throw_ClassCastException_entry); 3932 3933 // Come here on success. 3934 __ bind(ok_is_subtype); 3935 3936 __ z_lgr(Z_tos, receiver); // Restore object. 3937 3938 // Collect counts on whether this test sees NULLs a lot or not. 3939 if (ProfileInterpreter) { 3940 __ z_bru(done); 3941 __ bind(is_null); 3942 __ profile_null_seen(Z_tmp_1); 3943 } else { 3944 __ bind(is_null); // Same as 'done'. 3945 } 3946 3947 __ bind(done); 3948 BLOCK_COMMENT("} checkcast"); 3949 } 3950 3951 void TemplateTable::instanceof() { 3952 transition(atos, itos); 3953 3954 NearLabel done, is_null, ok_is_subtype, quicked, resolved; 3955 3956 BLOCK_COMMENT("instanceof {"); 3957 // If object is NULL, we are almost done. 3958 __ compareU64_and_branch(Z_tos, (intptr_t) 0, Assembler::bcondZero, is_null); 3959 3960 // Get cpool & tags index. 3961 Register cpool = Z_tmp_1; 3962 Register tags = Z_tmp_2; 3963 Register index = Z_ARG5; 3964 3965 __ get_cpool_and_tags(cpool, tags); 3966 __ get_2_byte_integer_at_bcp(index, 1, InterpreterMacroAssembler::Unsigned); 3967 // See if bytecode has already been quicked. 3968 // Note: For CLI, we would have to add the index to the tags pointer first, 3969 // thus load and compare in a "classic" manner. 3970 __ z_llgc(Z_R0_scratch, 3971 Address(tags, index, Array<u1>::base_offset_in_bytes())); 3972 __ compareU64_and_branch(Z_R0_scratch, JVM_CONSTANT_Class, Assembler::bcondEqual, quicked); 3973 3974 __ push(atos); // Save receiver for result, and for GC. 3975 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3976 __ get_vm_result_2(Z_tos); 3977 3978 Register receiver = Z_tmp_2; 3979 Register klass = Z_tos; 3980 Register subklass = Z_tmp_2; 3981 3982 __ pop_ptr(receiver); // Restore receiver. 3983 __ verify_oop(receiver); 3984 __ load_klass(subklass, subklass); 3985 __ z_bru(resolved); 3986 3987 // Get superklass in klass and subklass in subklass. 3988 __ bind(quicked); 3989 3990 __ load_klass(subklass, Z_tos); 3991 __ z_sllg(index, index, LogBytesPerWord); // index2bytes for addressing 3992 __ load_resolved_klass_at_offset(cpool, index, klass); 3993 3994 __ bind(resolved); 3995 3996 // Generate subtype check. 3997 // Superklass in klass. Subklass in subklass. 3998 __ gen_subtype_check(subklass, klass, Z_ARG4, Z_ARG5, ok_is_subtype); 3999 4000 // Come here on failure. 4001 __ clear_reg(Z_tos, true, false); 4002 __ z_bru(done); 4003 4004 // Come here on success. 4005 __ bind(ok_is_subtype); 4006 __ load_const_optimized(Z_tos, 1); 4007 4008 // Collect counts on whether this test sees NULLs a lot or not. 4009 if (ProfileInterpreter) { 4010 __ z_bru(done); 4011 __ bind(is_null); 4012 __ profile_null_seen(Z_tmp_1); 4013 } else { 4014 __ bind(is_null); // same as 'done' 4015 } 4016 4017 __ bind(done); 4018 // tos = 0: obj == NULL or obj is not an instanceof the specified klass 4019 // tos = 1: obj != NULL and obj is an instanceof the specified klass 4020 BLOCK_COMMENT("} instanceof"); 4021 } 4022 4023 //----------------------------------------------------------------------------- 4024 // Breakpoints 4025 void TemplateTable::_breakpoint() { 4026 4027 // Note: We get here even if we are single stepping. 4028 // Jbug insists on setting breakpoints at every bytecode 4029 // even if we are in single step mode. 4030 4031 transition(vtos, vtos); 4032 4033 // Get the unpatched byte code. 4034 __ get_method(Z_ARG2); 4035 __ call_VM(noreg, 4036 CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), 4037 Z_ARG2, Z_bcp); 4038 // Save the result to a register that is preserved over C-function calls. 4039 __ z_lgr(Z_tmp_1, Z_RET); 4040 4041 // Post the breakpoint event. 4042 __ get_method(Z_ARG2); 4043 __ call_VM(noreg, 4044 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), 4045 Z_ARG2, Z_bcp); 4046 4047 // Must restore the bytecode, because call_VM destroys Z_bytecode. 4048 __ z_lgr(Z_bytecode, Z_tmp_1); 4049 4050 // Complete the execution of original bytecode. 4051 __ dispatch_only_normal(vtos); 4052 } 4053 4054 4055 // Exceptions 4056 4057 void TemplateTable::athrow() { 4058 transition(atos, vtos); 4059 __ null_check(Z_tos); 4060 __ load_absolute_address(Z_ARG2, Interpreter::throw_exception_entry()); 4061 __ z_br(Z_ARG2); 4062 } 4063 4064 // Synchronization 4065 // 4066 // Note: monitorenter & exit are symmetric routines; which is reflected 4067 // in the assembly code structure as well 4068 // 4069 // Stack layout: 4070 // 4071 // callers_sp <- Z_SP (callers_sp == Z_fp (own fp)) 4072 // return_pc 4073 // [rest of ABI_160] 4074 // /slot o: free 4075 // / ... free 4076 // oper. | slot n+1: free <- Z_esp points to first free slot 4077 // stack | slot n: val caches IJAVA_STATE.esp 4078 // | ... 4079 // \slot 0: val 4080 // /slot m <- IJAVA_STATE.monitors = monitor block top 4081 // | ... 4082 // monitors| slot 2 4083 // | slot 1 4084 // \slot 0 4085 // /slot l <- monitor block bot 4086 // ijava_state | ... 4087 // | slot 2 4088 // \slot 0 4089 // <- Z_fp 4090 void TemplateTable::monitorenter() { 4091 transition(atos, vtos); 4092 4093 BLOCK_COMMENT("monitorenter {"); 4094 4095 // Check for NULL object. 4096 __ null_check(Z_tos); 4097 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 4098 NearLabel allocated; 4099 // Initialize entry pointer. 4100 const Register Rfree_slot = Z_tmp_1; 4101 __ clear_reg(Rfree_slot, true, false); // Points to free slot or NULL. Don't set CC. 4102 4103 // Find a free slot in the monitor block from top to bot (result in Rfree_slot). 4104 { 4105 const Register Rcurr_monitor = Z_ARG2; 4106 const Register Rbot = Z_ARG3; // Points to word under bottom of monitor block. 4107 const Register Rlocked_obj = Z_ARG4; 4108 NearLabel loop, exit, not_free; 4109 // Starting with top-most entry. 4110 __ get_monitors(Rcurr_monitor); // Rcur_monitor = IJAVA_STATE.monitors 4111 __ add2reg(Rbot, -frame::z_ijava_state_size, Z_fp); 4112 4113 #ifdef ASSERT 4114 address reentry = NULL; 4115 { NearLabel ok; 4116 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondNotHigh, ok); 4117 reentry = __ stop_chain_static(reentry, "IJAVA_STATE.monitors points below monitor block bottom"); 4118 __ bind(ok); 4119 } 4120 { NearLabel ok; 4121 __ compareU64_and_branch(Rcurr_monitor, Z_esp, Assembler::bcondHigh, ok); 4122 reentry = __ stop_chain_static(reentry, "IJAVA_STATE.monitors above Z_esp"); 4123 __ bind(ok); 4124 } 4125 #endif 4126 4127 // Check if bottom reached, i.e. if there is at least one monitor. 4128 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondEqual, exit); 4129 4130 __ bind(loop); 4131 // Check if current entry is used. 4132 __ load_and_test_long(Rlocked_obj, Address(Rcurr_monitor, BasicObjectLock::obj_offset_in_bytes())); 4133 __ z_brne(not_free); 4134 // If not used then remember entry in Rfree_slot. 4135 __ z_lgr(Rfree_slot, Rcurr_monitor); 4136 __ bind(not_free); 4137 // Exit if current entry is for same object; this guarantees, that new monitor 4138 // used for recursive lock is above the older one. 4139 __ compareU64_and_branch(Rlocked_obj, Z_tos, Assembler::bcondEqual, exit); 4140 // otherwise advance to next entry 4141 __ add2reg(Rcurr_monitor, entry_size); 4142 // Check if bottom reached, if not at bottom then check this entry. 4143 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondNotEqual, loop); 4144 __ bind(exit); 4145 } 4146 4147 // Rfree_slot != NULL -> found one 4148 __ compareU64_and_branch(Rfree_slot, (intptr_t)0L, Assembler::bcondNotEqual, allocated); 4149 4150 // Allocate one if there's no free slot. 4151 __ add_monitor_to_stack(false, Z_ARG3, Z_ARG4, Z_ARG5); 4152 __ get_monitors(Rfree_slot); 4153 4154 // Rfree_slot: points to monitor entry. 4155 __ bind(allocated); 4156 4157 // Increment bcp to point to the next bytecode, so exception 4158 // handling for async. exceptions work correctly. 4159 // The object has already been poped from the stack, so the 4160 // expression stack looks correct. 4161 __ add2reg(Z_bcp, 1, Z_bcp); 4162 4163 // Store object. 4164 __ z_stg(Z_tos, BasicObjectLock::obj_offset_in_bytes(), Rfree_slot); 4165 __ lock_object(Rfree_slot, Z_tos); 4166 4167 // Check to make sure this monitor doesn't cause stack overflow after locking. 4168 __ save_bcp(); // in case of exception 4169 __ generate_stack_overflow_check(0); 4170 4171 // The bcp has already been incremented. Just need to dispatch to 4172 // next instruction. 4173 __ dispatch_next(vtos); 4174 4175 BLOCK_COMMENT("} monitorenter"); 4176 } 4177 4178 4179 void TemplateTable::monitorexit() { 4180 transition(atos, vtos); 4181 4182 BLOCK_COMMENT("monitorexit {"); 4183 4184 // Check for NULL object. 4185 __ null_check(Z_tos); 4186 4187 NearLabel found, not_found; 4188 const Register Rcurr_monitor = Z_ARG2; 4189 4190 // Find matching slot. 4191 { 4192 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 4193 NearLabel entry, loop; 4194 4195 const Register Rbot = Z_ARG3; // Points to word under bottom of monitor block. 4196 const Register Rlocked_obj = Z_ARG4; 4197 // Starting with top-most entry. 4198 __ get_monitors(Rcurr_monitor); // Rcur_monitor = IJAVA_STATE.monitors 4199 __ add2reg(Rbot, -frame::z_ijava_state_size, Z_fp); 4200 4201 #ifdef ASSERT 4202 address reentry = NULL; 4203 { NearLabel ok; 4204 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondNotHigh, ok); 4205 reentry = __ stop_chain_static(reentry, "IJAVA_STATE.monitors points below monitor block bottom"); 4206 __ bind(ok); 4207 } 4208 { NearLabel ok; 4209 __ compareU64_and_branch(Rcurr_monitor, Z_esp, Assembler::bcondHigh, ok); 4210 reentry = __ stop_chain_static(reentry, "IJAVA_STATE.monitors above Z_esp"); 4211 __ bind(ok); 4212 } 4213 #endif 4214 4215 // Check if bottom reached, i.e. if there is at least one monitor. 4216 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondEqual, not_found); 4217 4218 __ bind(loop); 4219 // Check if current entry is for same object. 4220 __ z_lg(Rlocked_obj, Address(Rcurr_monitor, BasicObjectLock::obj_offset_in_bytes())); 4221 // If same object then stop searching. 4222 __ compareU64_and_branch(Rlocked_obj, Z_tos, Assembler::bcondEqual, found); 4223 // Otherwise advance to next entry. 4224 __ add2reg(Rcurr_monitor, entry_size); 4225 // Check if bottom reached, if not at bottom then check this entry. 4226 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondNotEqual, loop); 4227 } 4228 4229 __ bind(not_found); 4230 // Error handling. Unlocking was not block-structured. 4231 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 4232 InterpreterRuntime::throw_illegal_monitor_state_exception)); 4233 __ should_not_reach_here(); 4234 4235 __ bind(found); 4236 __ push_ptr(Z_tos); // Make sure object is on stack (contract with oopMaps). 4237 __ unlock_object(Rcurr_monitor, Z_tos); 4238 __ pop_ptr(Z_tos); // Discard object. 4239 BLOCK_COMMENT("} monitorexit"); 4240 } 4241 4242 // Wide instructions 4243 void TemplateTable::wide() { 4244 transition(vtos, vtos); 4245 4246 __ z_llgc(Z_R1_scratch, at_bcp(1)); 4247 __ z_sllg(Z_R1_scratch, Z_R1_scratch, LogBytesPerWord); 4248 __ load_absolute_address(Z_tmp_1, (address) Interpreter::_wentry_point); 4249 __ mem2reg_opt(Z_tmp_1, Address(Z_tmp_1, Z_R1_scratch)); 4250 __ z_br(Z_tmp_1); 4251 // Note: the bcp increment step is part of the individual wide 4252 // bytecode implementations. 4253 } 4254 4255 // Multi arrays 4256 void TemplateTable::multianewarray() { 4257 transition(vtos, atos); 4258 4259 __ z_llgc(Z_tmp_1, at_bcp(3)); // Get number of dimensions. 4260 // Slot count to byte offset. 4261 __ z_sllg(Z_tmp_1, Z_tmp_1, Interpreter::logStackElementSize); 4262 // Z_esp points past last_dim, so set to Z_ARG2 to first_dim address. 4263 __ load_address(Z_ARG2, Address(Z_esp, Z_tmp_1)); 4264 call_VM(Z_RET, 4265 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), 4266 Z_ARG2); 4267 // Pop dimensions from expression stack. 4268 __ z_agr(Z_esp, Z_tmp_1); 4269 }