1 /* 2 * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. 3 * Copyright 2013, 2014 SAP AG. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterRuntime.hpp" 30 #include "interpreter/templateInterpreter.hpp" 31 #include "interpreter/templateTable.hpp" 32 #include "memory/universe.inline.hpp" 33 #include "oops/objArrayKlass.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "prims/methodHandles.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/stubRoutines.hpp" 38 #include "runtime/synchronizer.hpp" 39 #include "utilities/macros.hpp" 40 41 #ifndef CC_INTERP 42 43 #undef __ 44 #define __ _masm-> 45 46 // ============================================================================ 47 // Misc helpers 48 49 // Do an oop store like *(base + index) = val OR *(base + offset) = val 50 // (only one of both variants is possible at the same time). 51 // Index can be noreg. 52 // Kills: 53 // Rbase, Rtmp 54 static void do_oop_store(InterpreterMacroAssembler* _masm, 55 Register Rbase, 56 RegisterOrConstant offset, 57 Register Rval, // Noreg means always null. 58 Register Rtmp1, 59 Register Rtmp2, 60 Register Rtmp3, 61 BarrierSet::Name barrier, 62 bool precise, 63 bool check_null) { 64 assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase); 65 66 switch (barrier) { 67 #if INCLUDE_ALL_GCS 68 case BarrierSet::G1SATBCT: 69 case BarrierSet::G1SATBCTLogging: 70 { 71 // Load and record the previous value. 72 __ g1_write_barrier_pre(Rbase, offset, 73 Rtmp3, /* holder of pre_val ? */ 74 Rtmp1, Rtmp2, false /* frame */); 75 76 Label Lnull, Ldone; 77 if (Rval != noreg) { 78 if (check_null) { 79 __ cmpdi(CCR0, Rval, 0); 80 __ beq(CCR0, Lnull); 81 } 82 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval must stay uncompressed.*/ Rtmp1); 83 // Mark the card. 84 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 85 __ add(Rbase, offset, Rbase); 86 } 87 __ g1_write_barrier_post(Rbase, Rval, Rtmp1, Rtmp2, Rtmp3, /*filtered (fast path)*/ &Ldone); 88 if (check_null) { __ b(Ldone); } 89 } 90 91 if (Rval == noreg || check_null) { // Store null oop. 92 Register Rnull = Rval; 93 __ bind(Lnull); 94 if (Rval == noreg) { 95 Rnull = Rtmp1; 96 __ li(Rnull, 0); 97 } 98 if (UseCompressedOops) { 99 __ stw(Rnull, offset, Rbase); 100 } else { 101 __ std(Rnull, offset, Rbase); 102 } 103 } 104 __ bind(Ldone); 105 } 106 break; 107 #endif // INCLUDE_ALL_GCS 108 case BarrierSet::CardTableModRef: 109 case BarrierSet::CardTableExtension: 110 { 111 Label Lnull, Ldone; 112 if (Rval != noreg) { 113 if (check_null) { 114 __ cmpdi(CCR0, Rval, 0); 115 __ beq(CCR0, Lnull); 116 } 117 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval should better stay uncompressed.*/ Rtmp1); 118 // Mark the card. 119 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 120 __ add(Rbase, offset, Rbase); 121 } 122 __ card_write_barrier_post(Rbase, Rval, Rtmp1); 123 if (check_null) { 124 __ b(Ldone); 125 } 126 } 127 128 if (Rval == noreg || check_null) { // Store null oop. 129 Register Rnull = Rval; 130 __ bind(Lnull); 131 if (Rval == noreg) { 132 Rnull = Rtmp1; 133 __ li(Rnull, 0); 134 } 135 if (UseCompressedOops) { 136 __ stw(Rnull, offset, Rbase); 137 } else { 138 __ std(Rnull, offset, Rbase); 139 } 140 } 141 __ bind(Ldone); 142 } 143 break; 144 case BarrierSet::ModRef: 145 case BarrierSet::Other: 146 ShouldNotReachHere(); 147 break; 148 default: 149 ShouldNotReachHere(); 150 } 151 } 152 153 // ============================================================================ 154 // Platform-dependent initialization 155 156 void TemplateTable::pd_initialize() { 157 // No ppc64 specific initialization. 158 } 159 160 Address TemplateTable::at_bcp(int offset) { 161 // Not used on ppc. 162 ShouldNotReachHere(); 163 return Address(); 164 } 165 166 // Patches the current bytecode (ptr to it located in bcp) 167 // in the bytecode stream with a new one. 168 void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) { 169 // With sharing on, may need to test method flag. 170 if (!RewriteBytecodes) return; 171 Label L_patch_done; 172 173 switch (new_bc) { 174 case Bytecodes::_fast_aputfield: 175 case Bytecodes::_fast_bputfield: 176 case Bytecodes::_fast_zputfield: 177 case Bytecodes::_fast_cputfield: 178 case Bytecodes::_fast_dputfield: 179 case Bytecodes::_fast_fputfield: 180 case Bytecodes::_fast_iputfield: 181 case Bytecodes::_fast_lputfield: 182 case Bytecodes::_fast_sputfield: 183 { 184 // We skip bytecode quickening for putfield instructions when 185 // the put_code written to the constant pool cache is zero. 186 // This is required so that every execution of this instruction 187 // calls out to InterpreterRuntime::resolve_get_put to do 188 // additional, required work. 189 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 190 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 191 __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1); 192 // ((*(cache+indices))>>((1+byte_no)*8))&0xFF: 193 #if defined(VM_LITTLE_ENDIAN) 194 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp); 195 #else 196 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp); 197 #endif 198 __ cmpwi(CCR0, Rnew_bc, 0); 199 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 200 __ beq(CCR0, L_patch_done); 201 // __ isync(); // acquire not needed 202 break; 203 } 204 205 default: 206 assert(byte_no == -1, "sanity"); 207 if (load_bc_into_bc_reg) { 208 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 209 } 210 } 211 212 if (JvmtiExport::can_post_breakpoint()) { 213 Label L_fast_patch; 214 __ lbz(Rtemp, 0, R14_bcp); 215 __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint); 216 __ bne(CCR0, L_fast_patch); 217 // Perform the quickening, slowly, in the bowels of the breakpoint table. 218 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc); 219 __ b(L_patch_done); 220 __ bind(L_fast_patch); 221 } 222 223 // Patch bytecode. 224 __ stb(Rnew_bc, 0, R14_bcp); 225 226 __ bind(L_patch_done); 227 } 228 229 // ============================================================================ 230 // Individual instructions 231 232 void TemplateTable::nop() { 233 transition(vtos, vtos); 234 // Nothing to do. 235 } 236 237 void TemplateTable::shouldnotreachhere() { 238 transition(vtos, vtos); 239 __ stop("shouldnotreachhere bytecode"); 240 } 241 242 void TemplateTable::aconst_null() { 243 transition(vtos, atos); 244 __ li(R17_tos, 0); 245 } 246 247 void TemplateTable::iconst(int value) { 248 transition(vtos, itos); 249 assert(value >= -1 && value <= 5, ""); 250 __ li(R17_tos, value); 251 } 252 253 void TemplateTable::lconst(int value) { 254 transition(vtos, ltos); 255 assert(value >= -1 && value <= 5, ""); 256 __ li(R17_tos, value); 257 } 258 259 void TemplateTable::fconst(int value) { 260 transition(vtos, ftos); 261 static float zero = 0.0; 262 static float one = 1.0; 263 static float two = 2.0; 264 switch (value) { 265 default: ShouldNotReachHere(); 266 case 0: { 267 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 268 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 269 break; 270 } 271 case 1: { 272 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 273 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 274 break; 275 } 276 case 2: { 277 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true); 278 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 279 break; 280 } 281 } 282 } 283 284 void TemplateTable::dconst(int value) { 285 transition(vtos, dtos); 286 static double zero = 0.0; 287 static double one = 1.0; 288 switch (value) { 289 case 0: { 290 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 291 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 292 break; 293 } 294 case 1: { 295 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 296 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 297 break; 298 } 299 default: ShouldNotReachHere(); 300 } 301 } 302 303 void TemplateTable::bipush() { 304 transition(vtos, itos); 305 __ lbz(R17_tos, 1, R14_bcp); 306 __ extsb(R17_tos, R17_tos); 307 } 308 309 void TemplateTable::sipush() { 310 transition(vtos, itos); 311 __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed); 312 } 313 314 void TemplateTable::ldc(bool wide) { 315 Register Rscratch1 = R11_scratch1, 316 Rscratch2 = R12_scratch2, 317 Rcpool = R3_ARG1; 318 319 transition(vtos, vtos); 320 Label notInt, notClass, exit; 321 322 __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags. 323 if (wide) { // Read index. 324 __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned); 325 } else { 326 __ lbz(Rscratch1, 1, R14_bcp); 327 } 328 329 const int base_offset = ConstantPool::header_size() * wordSize; 330 const int tags_offset = Array<u1>::base_offset_in_bytes(); 331 332 // Get type from tags. 333 __ addi(Rscratch2, Rscratch2, tags_offset); 334 __ lbzx(Rscratch2, Rscratch2, Rscratch1); 335 336 __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class? 337 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state? 338 __ cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); 339 340 // Resolved class - need to call vm to get java mirror of the class. 341 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class); 342 __ crnor(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); // Neither resolved class nor unresolved case from above? 343 __ beq(CCR0, notClass); 344 345 __ li(R4, wide ? 1 : 0); 346 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4); 347 __ push(atos); 348 __ b(exit); 349 350 __ align(32, 12); 351 __ bind(notClass); 352 __ addi(Rcpool, Rcpool, base_offset); 353 __ sldi(Rscratch1, Rscratch1, LogBytesPerWord); 354 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer); 355 __ bne(CCR0, notInt); 356 __ lwax(R17_tos, Rcpool, Rscratch1); 357 __ push(itos); 358 __ b(exit); 359 360 __ align(32, 12); 361 __ bind(notInt); 362 #ifdef ASSERT 363 // String and Object are rewritten to fast_aldc 364 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float); 365 __ asm_assert_eq("unexpected type", 0x8765); 366 #endif 367 __ lfsx(F15_ftos, Rcpool, Rscratch1); 368 __ push(ftos); 369 370 __ align(32, 12); 371 __ bind(exit); 372 } 373 374 // Fast path for caching oop constants. 375 void TemplateTable::fast_aldc(bool wide) { 376 transition(vtos, atos); 377 378 int index_size = wide ? sizeof(u2) : sizeof(u1); 379 const Register Rscratch = R11_scratch1; 380 Label resolved; 381 382 // We are resolved if the resolved reference cache entry contains a 383 // non-null object (CallSite, etc.) 384 __ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index. 385 __ load_resolved_reference_at_index(R17_tos, Rscratch); 386 __ cmpdi(CCR0, R17_tos, 0); 387 __ bne(CCR0, resolved); 388 __ load_const_optimized(R3_ARG1, (int)bytecode()); 389 390 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 391 392 // First time invocation - must resolve first. 393 __ call_VM(R17_tos, entry, R3_ARG1); 394 395 __ align(32, 12); 396 __ bind(resolved); 397 __ verify_oop(R17_tos); 398 } 399 400 void TemplateTable::ldc2_w() { 401 transition(vtos, vtos); 402 Label Llong, Lexit; 403 404 Register Rindex = R11_scratch1, 405 Rcpool = R12_scratch2, 406 Rtag = R3_ARG1; 407 __ get_cpool_and_tags(Rcpool, Rtag); 408 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 409 410 const int base_offset = ConstantPool::header_size() * wordSize; 411 const int tags_offset = Array<u1>::base_offset_in_bytes(); 412 // Get type from tags. 413 __ addi(Rcpool, Rcpool, base_offset); 414 __ addi(Rtag, Rtag, tags_offset); 415 416 __ lbzx(Rtag, Rtag, Rindex); 417 418 __ sldi(Rindex, Rindex, LogBytesPerWord); 419 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double); 420 __ bne(CCR0, Llong); 421 // A double can be placed at word-aligned locations in the constant pool. 422 // Check out Conversions.java for an example. 423 // Also ConstantPool::header_size() is 20, which makes it very difficult 424 // to double-align double on the constant pool. SG, 11/7/97 425 __ lfdx(F15_ftos, Rcpool, Rindex); 426 __ push(dtos); 427 __ b(Lexit); 428 429 __ bind(Llong); 430 __ ldx(R17_tos, Rcpool, Rindex); 431 __ push(ltos); 432 433 __ bind(Lexit); 434 } 435 436 // Get the locals index located in the bytecode stream at bcp + offset. 437 void TemplateTable::locals_index(Register Rdst, int offset) { 438 __ lbz(Rdst, offset, R14_bcp); 439 } 440 441 void TemplateTable::iload() { 442 transition(vtos, itos); 443 444 // Get the local value into tos 445 const Register Rindex = R22_tmp2; 446 locals_index(Rindex); 447 448 // Rewrite iload,iload pair into fast_iload2 449 // iload,caload pair into fast_icaload 450 if (RewriteFrequentPairs) { 451 Label Lrewrite, Ldone; 452 Register Rnext_byte = R3_ARG1, 453 Rrewrite_to = R6_ARG4, 454 Rscratch = R11_scratch1; 455 456 // get next byte 457 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp); 458 459 // if _iload, wait to rewrite to iload2. We only want to rewrite the 460 // last two iloads in a pair. Comparing against fast_iload means that 461 // the next bytecode is neither an iload or a caload, and therefore 462 // an iload pair. 463 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload); 464 __ beq(CCR0, Ldone); 465 466 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 467 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2); 468 __ beq(CCR1, Lrewrite); 469 470 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload); 471 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload); 472 __ beq(CCR0, Lrewrite); 473 474 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 475 476 __ bind(Lrewrite); 477 patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false); 478 __ bind(Ldone); 479 } 480 481 __ load_local_int(R17_tos, Rindex, Rindex); 482 } 483 484 // Load 2 integers in a row without dispatching 485 void TemplateTable::fast_iload2() { 486 transition(vtos, itos); 487 488 __ lbz(R3_ARG1, 1, R14_bcp); 489 __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp); 490 491 __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1); 492 __ load_local_int(R17_tos, R12_scratch2, R17_tos); 493 __ push_i(R3_ARG1); 494 } 495 496 void TemplateTable::fast_iload() { 497 transition(vtos, itos); 498 // Get the local value into tos 499 500 const Register Rindex = R11_scratch1; 501 locals_index(Rindex); 502 __ load_local_int(R17_tos, Rindex, Rindex); 503 } 504 505 // Load a local variable type long from locals area to TOS cache register. 506 // Local index resides in bytecodestream. 507 void TemplateTable::lload() { 508 transition(vtos, ltos); 509 510 const Register Rindex = R11_scratch1; 511 locals_index(Rindex); 512 __ load_local_long(R17_tos, Rindex, Rindex); 513 } 514 515 void TemplateTable::fload() { 516 transition(vtos, ftos); 517 518 const Register Rindex = R11_scratch1; 519 locals_index(Rindex); 520 __ load_local_float(F15_ftos, Rindex, Rindex); 521 } 522 523 void TemplateTable::dload() { 524 transition(vtos, dtos); 525 526 const Register Rindex = R11_scratch1; 527 locals_index(Rindex); 528 __ load_local_double(F15_ftos, Rindex, Rindex); 529 } 530 531 void TemplateTable::aload() { 532 transition(vtos, atos); 533 534 const Register Rindex = R11_scratch1; 535 locals_index(Rindex); 536 __ load_local_ptr(R17_tos, Rindex, Rindex); 537 } 538 539 void TemplateTable::locals_index_wide(Register Rdst) { 540 // Offset is 2, not 1, because Lbcp points to wide prefix code. 541 __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned); 542 } 543 544 void TemplateTable::wide_iload() { 545 // Get the local value into tos. 546 547 const Register Rindex = R11_scratch1; 548 locals_index_wide(Rindex); 549 __ load_local_int(R17_tos, Rindex, Rindex); 550 } 551 552 void TemplateTable::wide_lload() { 553 transition(vtos, ltos); 554 555 const Register Rindex = R11_scratch1; 556 locals_index_wide(Rindex); 557 __ load_local_long(R17_tos, Rindex, Rindex); 558 } 559 560 void TemplateTable::wide_fload() { 561 transition(vtos, ftos); 562 563 const Register Rindex = R11_scratch1; 564 locals_index_wide(Rindex); 565 __ load_local_float(F15_ftos, Rindex, Rindex); 566 } 567 568 void TemplateTable::wide_dload() { 569 transition(vtos, dtos); 570 571 const Register Rindex = R11_scratch1; 572 locals_index_wide(Rindex); 573 __ load_local_double(F15_ftos, Rindex, Rindex); 574 } 575 576 void TemplateTable::wide_aload() { 577 transition(vtos, atos); 578 579 const Register Rindex = R11_scratch1; 580 locals_index_wide(Rindex); 581 __ load_local_ptr(R17_tos, Rindex, Rindex); 582 } 583 584 void TemplateTable::iaload() { 585 transition(itos, itos); 586 587 const Register Rload_addr = R3_ARG1, 588 Rarray = R4_ARG2, 589 Rtemp = R5_ARG3; 590 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 591 __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr); 592 } 593 594 void TemplateTable::laload() { 595 transition(itos, ltos); 596 597 const Register Rload_addr = R3_ARG1, 598 Rarray = R4_ARG2, 599 Rtemp = R5_ARG3; 600 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 601 __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr); 602 } 603 604 void TemplateTable::faload() { 605 transition(itos, ftos); 606 607 const Register Rload_addr = R3_ARG1, 608 Rarray = R4_ARG2, 609 Rtemp = R5_ARG3; 610 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 611 __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr); 612 } 613 614 void TemplateTable::daload() { 615 transition(itos, dtos); 616 617 const Register Rload_addr = R3_ARG1, 618 Rarray = R4_ARG2, 619 Rtemp = R5_ARG3; 620 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 621 __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr); 622 } 623 624 void TemplateTable::aaload() { 625 transition(itos, atos); 626 627 // tos: index 628 // result tos: array 629 const Register Rload_addr = R3_ARG1, 630 Rarray = R4_ARG2, 631 Rtemp = R5_ARG3; 632 __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr); 633 __ load_heap_oop(R17_tos, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rload_addr); 634 __ verify_oop(R17_tos); 635 //__ dcbt(R17_tos); // prefetch 636 } 637 638 void TemplateTable::baload() { 639 transition(itos, itos); 640 641 const Register Rload_addr = R3_ARG1, 642 Rarray = R4_ARG2, 643 Rtemp = R5_ARG3; 644 __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr); 645 __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr); 646 __ extsb(R17_tos, R17_tos); 647 } 648 649 void TemplateTable::caload() { 650 transition(itos, itos); 651 652 const Register Rload_addr = R3_ARG1, 653 Rarray = R4_ARG2, 654 Rtemp = R5_ARG3; 655 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 656 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 657 } 658 659 // Iload followed by caload frequent pair. 660 void TemplateTable::fast_icaload() { 661 transition(vtos, itos); 662 663 const Register Rload_addr = R3_ARG1, 664 Rarray = R4_ARG2, 665 Rtemp = R11_scratch1; 666 667 locals_index(R17_tos); 668 __ load_local_int(R17_tos, Rtemp, R17_tos); 669 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 670 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 671 } 672 673 void TemplateTable::saload() { 674 transition(itos, itos); 675 676 const Register Rload_addr = R11_scratch1, 677 Rarray = R12_scratch2, 678 Rtemp = R3_ARG1; 679 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 680 __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr); 681 } 682 683 void TemplateTable::iload(int n) { 684 transition(vtos, itos); 685 686 __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 687 } 688 689 void TemplateTable::lload(int n) { 690 transition(vtos, ltos); 691 692 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 693 } 694 695 void TemplateTable::fload(int n) { 696 transition(vtos, ftos); 697 698 __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 699 } 700 701 void TemplateTable::dload(int n) { 702 transition(vtos, dtos); 703 704 __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 705 } 706 707 void TemplateTable::aload(int n) { 708 transition(vtos, atos); 709 710 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 711 } 712 713 void TemplateTable::aload_0() { 714 transition(vtos, atos); 715 // According to bytecode histograms, the pairs: 716 // 717 // _aload_0, _fast_igetfield 718 // _aload_0, _fast_agetfield 719 // _aload_0, _fast_fgetfield 720 // 721 // occur frequently. If RewriteFrequentPairs is set, the (slow) 722 // _aload_0 bytecode checks if the next bytecode is either 723 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then 724 // rewrites the current bytecode into a pair bytecode; otherwise it 725 // rewrites the current bytecode into _0 that doesn't do 726 // the pair check anymore. 727 // 728 // Note: If the next bytecode is _getfield, the rewrite must be 729 // delayed, otherwise we may miss an opportunity for a pair. 730 // 731 // Also rewrite frequent pairs 732 // aload_0, aload_1 733 // aload_0, iload_1 734 // These bytecodes with a small amount of code are most profitable 735 // to rewrite. 736 737 if (RewriteFrequentPairs) { 738 739 Label Lrewrite, Ldont_rewrite; 740 Register Rnext_byte = R3_ARG1, 741 Rrewrite_to = R6_ARG4, 742 Rscratch = R11_scratch1; 743 744 // Get next byte. 745 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp); 746 747 // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair. 748 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield); 749 __ beq(CCR0, Ldont_rewrite); 750 751 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield); 752 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0); 753 __ beq(CCR1, Lrewrite); 754 755 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield); 756 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0); 757 __ beq(CCR0, Lrewrite); 758 759 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield); 760 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0); 761 __ beq(CCR1, Lrewrite); 762 763 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0); 764 765 __ bind(Lrewrite); 766 patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false); 767 __ bind(Ldont_rewrite); 768 } 769 770 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). 771 aload(0); 772 } 773 774 void TemplateTable::istore() { 775 transition(itos, vtos); 776 777 const Register Rindex = R11_scratch1; 778 locals_index(Rindex); 779 __ store_local_int(R17_tos, Rindex); 780 } 781 782 void TemplateTable::lstore() { 783 transition(ltos, vtos); 784 const Register Rindex = R11_scratch1; 785 locals_index(Rindex); 786 __ store_local_long(R17_tos, Rindex); 787 } 788 789 void TemplateTable::fstore() { 790 transition(ftos, vtos); 791 792 const Register Rindex = R11_scratch1; 793 locals_index(Rindex); 794 __ store_local_float(F15_ftos, Rindex); 795 } 796 797 void TemplateTable::dstore() { 798 transition(dtos, vtos); 799 800 const Register Rindex = R11_scratch1; 801 locals_index(Rindex); 802 __ store_local_double(F15_ftos, Rindex); 803 } 804 805 void TemplateTable::astore() { 806 transition(vtos, vtos); 807 808 const Register Rindex = R11_scratch1; 809 __ pop_ptr(); 810 __ verify_oop_or_return_address(R17_tos, Rindex); 811 locals_index(Rindex); 812 __ store_local_ptr(R17_tos, Rindex); 813 } 814 815 void TemplateTable::wide_istore() { 816 transition(vtos, vtos); 817 818 const Register Rindex = R11_scratch1; 819 __ pop_i(); 820 locals_index_wide(Rindex); 821 __ store_local_int(R17_tos, Rindex); 822 } 823 824 void TemplateTable::wide_lstore() { 825 transition(vtos, vtos); 826 827 const Register Rindex = R11_scratch1; 828 __ pop_l(); 829 locals_index_wide(Rindex); 830 __ store_local_long(R17_tos, Rindex); 831 } 832 833 void TemplateTable::wide_fstore() { 834 transition(vtos, vtos); 835 836 const Register Rindex = R11_scratch1; 837 __ pop_f(); 838 locals_index_wide(Rindex); 839 __ store_local_float(F15_ftos, Rindex); 840 } 841 842 void TemplateTable::wide_dstore() { 843 transition(vtos, vtos); 844 845 const Register Rindex = R11_scratch1; 846 __ pop_d(); 847 locals_index_wide(Rindex); 848 __ store_local_double(F15_ftos, Rindex); 849 } 850 851 void TemplateTable::wide_astore() { 852 transition(vtos, vtos); 853 854 const Register Rindex = R11_scratch1; 855 __ pop_ptr(); 856 __ verify_oop_or_return_address(R17_tos, Rindex); 857 locals_index_wide(Rindex); 858 __ store_local_ptr(R17_tos, Rindex); 859 } 860 861 void TemplateTable::iastore() { 862 transition(itos, vtos); 863 864 const Register Rindex = R3_ARG1, 865 Rstore_addr = R4_ARG2, 866 Rarray = R5_ARG3, 867 Rtemp = R6_ARG4; 868 __ pop_i(Rindex); 869 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 870 __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr); 871 } 872 873 void TemplateTable::lastore() { 874 transition(ltos, vtos); 875 876 const Register Rindex = R3_ARG1, 877 Rstore_addr = R4_ARG2, 878 Rarray = R5_ARG3, 879 Rtemp = R6_ARG4; 880 __ pop_i(Rindex); 881 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 882 __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr); 883 } 884 885 void TemplateTable::fastore() { 886 transition(ftos, vtos); 887 888 const Register Rindex = R3_ARG1, 889 Rstore_addr = R4_ARG2, 890 Rarray = R5_ARG3, 891 Rtemp = R6_ARG4; 892 __ pop_i(Rindex); 893 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 894 __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr); 895 } 896 897 void TemplateTable::dastore() { 898 transition(dtos, vtos); 899 900 const Register Rindex = R3_ARG1, 901 Rstore_addr = R4_ARG2, 902 Rarray = R5_ARG3, 903 Rtemp = R6_ARG4; 904 __ pop_i(Rindex); 905 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 906 __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr); 907 } 908 909 // Pop 3 values from the stack and... 910 void TemplateTable::aastore() { 911 transition(vtos, vtos); 912 913 Label Lstore_ok, Lis_null, Ldone; 914 const Register Rindex = R3_ARG1, 915 Rarray = R4_ARG2, 916 Rscratch = R11_scratch1, 917 Rscratch2 = R12_scratch2, 918 Rarray_klass = R5_ARG3, 919 Rarray_element_klass = Rarray_klass, 920 Rvalue_klass = R6_ARG4, 921 Rstore_addr = R31; // Use register which survives VM call. 922 923 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store. 924 __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index. 925 __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp); // Get array. 926 927 __ verify_oop(R17_tos); 928 __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr); 929 // Rindex is dead! 930 Register Rscratch3 = Rindex; 931 932 // Do array store check - check for NULL value first. 933 __ cmpdi(CCR0, R17_tos, 0); 934 __ beq(CCR0, Lis_null); 935 936 __ load_klass(Rarray_klass, Rarray); 937 __ load_klass(Rvalue_klass, R17_tos); 938 939 // Do fast instanceof cache test. 940 __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass); 941 942 // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure. 943 __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok); 944 945 // Fell through: subtype check failed => throw an exception. 946 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry); 947 __ mtctr(R11_scratch1); 948 __ bctr(); 949 950 __ bind(Lis_null); 951 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */, 952 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 953 __ profile_null_seen(Rscratch, Rscratch2); 954 __ b(Ldone); 955 956 // Store is OK. 957 __ bind(Lstore_ok); 958 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */, 959 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 960 961 __ bind(Ldone); 962 // Adjust sp (pops array, index and value). 963 __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize); 964 } 965 966 void TemplateTable::bastore() { 967 transition(itos, vtos); 968 969 const Register Rindex = R11_scratch1, 970 Rarray = R12_scratch2, 971 Rscratch = R3_ARG1; 972 __ pop_i(Rindex); 973 __ pop_ptr(Rarray); 974 // tos: val 975 976 // Need to check whether array is boolean or byte 977 // since both types share the bastore bytecode. 978 __ load_klass(Rscratch, Rarray); 979 __ lwz(Rscratch, in_bytes(Klass::layout_helper_offset()), Rscratch); 980 int diffbit = exact_log2(Klass::layout_helper_boolean_diffbit()); 981 __ testbitdi(CCR0, R0, Rscratch, diffbit); 982 Label L_skip; 983 __ bfalse(CCR0, L_skip); 984 __ andi(R17_tos, R17_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1 985 __ bind(L_skip); 986 987 __ index_check_without_pop(Rarray, Rindex, 0, Rscratch, Rarray); 988 __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray); 989 } 990 991 void TemplateTable::castore() { 992 transition(itos, vtos); 993 994 const Register Rindex = R11_scratch1, 995 Rarray = R12_scratch2, 996 Rscratch = R3_ARG1; 997 __ pop_i(Rindex); 998 // tos: val 999 // Rarray: array ptr (popped by index_check) 1000 __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray); 1001 __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray); 1002 } 1003 1004 void TemplateTable::sastore() { 1005 castore(); 1006 } 1007 1008 void TemplateTable::istore(int n) { 1009 transition(itos, vtos); 1010 __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1011 } 1012 1013 void TemplateTable::lstore(int n) { 1014 transition(ltos, vtos); 1015 __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1016 } 1017 1018 void TemplateTable::fstore(int n) { 1019 transition(ftos, vtos); 1020 __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 1021 } 1022 1023 void TemplateTable::dstore(int n) { 1024 transition(dtos, vtos); 1025 __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1026 } 1027 1028 void TemplateTable::astore(int n) { 1029 transition(vtos, vtos); 1030 1031 __ pop_ptr(); 1032 __ verify_oop_or_return_address(R17_tos, R11_scratch1); 1033 __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1034 } 1035 1036 void TemplateTable::pop() { 1037 transition(vtos, vtos); 1038 1039 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize); 1040 } 1041 1042 void TemplateTable::pop2() { 1043 transition(vtos, vtos); 1044 1045 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2); 1046 } 1047 1048 void TemplateTable::dup() { 1049 transition(vtos, vtos); 1050 1051 __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp); 1052 __ push_ptr(R11_scratch1); 1053 } 1054 1055 void TemplateTable::dup_x1() { 1056 transition(vtos, vtos); 1057 1058 Register Ra = R11_scratch1, 1059 Rb = R12_scratch2; 1060 // stack: ..., a, b 1061 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1062 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1063 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1064 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1065 __ push_ptr(Rb); 1066 // stack: ..., b, a, b 1067 } 1068 1069 void TemplateTable::dup_x2() { 1070 transition(vtos, vtos); 1071 1072 Register Ra = R11_scratch1, 1073 Rb = R12_scratch2, 1074 Rc = R3_ARG1; 1075 1076 // stack: ..., a, b, c 1077 __ ld(Rc, Interpreter::stackElementSize, R15_esp); // load c 1078 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); // load a 1079 __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a 1080 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); // load b 1081 // stack: ..., c, b, c 1082 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b 1083 // stack: ..., c, a, c 1084 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in c 1085 __ push_ptr(Rc); // push c 1086 // stack: ..., c, a, b, c 1087 } 1088 1089 void TemplateTable::dup2() { 1090 transition(vtos, vtos); 1091 1092 Register Ra = R11_scratch1, 1093 Rb = R12_scratch2; 1094 // stack: ..., a, b 1095 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1096 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1097 __ push_2ptrs(Ra, Rb); 1098 // stack: ..., a, b, a, b 1099 } 1100 1101 void TemplateTable::dup2_x1() { 1102 transition(vtos, vtos); 1103 1104 Register Ra = R11_scratch1, 1105 Rb = R12_scratch2, 1106 Rc = R3_ARG1; 1107 // stack: ..., a, b, c 1108 __ ld(Rc, Interpreter::stackElementSize, R15_esp); 1109 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); 1110 __ std(Rc, Interpreter::stackElementSize * 2, R15_esp); 1111 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); 1112 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1113 __ std(Rb, Interpreter::stackElementSize * 3, R15_esp); 1114 // stack: ..., b, c, a 1115 __ push_2ptrs(Rb, Rc); 1116 // stack: ..., b, c, a, b, c 1117 } 1118 1119 void TemplateTable::dup2_x2() { 1120 transition(vtos, vtos); 1121 1122 Register Ra = R11_scratch1, 1123 Rb = R12_scratch2, 1124 Rc = R3_ARG1, 1125 Rd = R4_ARG2; 1126 // stack: ..., a, b, c, d 1127 __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp); 1128 __ ld(Rd, Interpreter::stackElementSize, R15_esp); 1129 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in d 1130 __ std(Rd, Interpreter::stackElementSize * 3, R15_esp); // store d in b 1131 __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp); 1132 __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp); 1133 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in c 1134 __ std(Rc, Interpreter::stackElementSize * 4, R15_esp); // store c in a 1135 // stack: ..., c, d, a, b 1136 __ push_2ptrs(Rc, Rd); 1137 // stack: ..., c, d, a, b, c, d 1138 } 1139 1140 void TemplateTable::swap() { 1141 transition(vtos, vtos); 1142 // stack: ..., a, b 1143 1144 Register Ra = R11_scratch1, 1145 Rb = R12_scratch2; 1146 // stack: ..., a, b 1147 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1148 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1149 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1150 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1151 // stack: ..., b, a 1152 } 1153 1154 void TemplateTable::iop2(Operation op) { 1155 transition(itos, itos); 1156 1157 Register Rscratch = R11_scratch1; 1158 1159 __ pop_i(Rscratch); 1160 // tos = number of bits to shift 1161 // Rscratch = value to shift 1162 switch (op) { 1163 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1164 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1165 case mul: __ mullw(R17_tos, Rscratch, R17_tos); break; 1166 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1167 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1168 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1169 case shl: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break; 1170 case shr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break; 1171 case ushr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break; 1172 default: ShouldNotReachHere(); 1173 } 1174 } 1175 1176 void TemplateTable::lop2(Operation op) { 1177 transition(ltos, ltos); 1178 1179 Register Rscratch = R11_scratch1; 1180 __ pop_l(Rscratch); 1181 switch (op) { 1182 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1183 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1184 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1185 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1186 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1187 default: ShouldNotReachHere(); 1188 } 1189 } 1190 1191 void TemplateTable::idiv() { 1192 transition(itos, itos); 1193 1194 Label Lnormal, Lexception, Ldone; 1195 Register Rdividend = R11_scratch1; // Used by irem. 1196 1197 __ addi(R0, R17_tos, 1); 1198 __ cmplwi(CCR0, R0, 2); 1199 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1200 1201 __ cmpwi(CCR1, R17_tos, 0); 1202 __ beq(CCR1, Lexception); // divisor == 0 1203 1204 __ pop_i(Rdividend); 1205 __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1 1206 __ b(Ldone); 1207 1208 __ bind(Lexception); 1209 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1210 __ mtctr(R11_scratch1); 1211 __ bctr(); 1212 1213 __ align(32, 12); 1214 __ bind(Lnormal); 1215 __ pop_i(Rdividend); 1216 __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1217 __ bind(Ldone); 1218 } 1219 1220 void TemplateTable::irem() { 1221 transition(itos, itos); 1222 1223 __ mr(R12_scratch2, R17_tos); 1224 idiv(); 1225 __ mullw(R17_tos, R17_tos, R12_scratch2); 1226 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv. 1227 } 1228 1229 void TemplateTable::lmul() { 1230 transition(ltos, ltos); 1231 1232 __ pop_l(R11_scratch1); 1233 __ mulld(R17_tos, R11_scratch1, R17_tos); 1234 } 1235 1236 void TemplateTable::ldiv() { 1237 transition(ltos, ltos); 1238 1239 Label Lnormal, Lexception, Ldone; 1240 Register Rdividend = R11_scratch1; // Used by lrem. 1241 1242 __ addi(R0, R17_tos, 1); 1243 __ cmpldi(CCR0, R0, 2); 1244 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1245 1246 __ cmpdi(CCR1, R17_tos, 0); 1247 __ beq(CCR1, Lexception); // divisor == 0 1248 1249 __ pop_l(Rdividend); 1250 __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1 1251 __ b(Ldone); 1252 1253 __ bind(Lexception); 1254 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1255 __ mtctr(R11_scratch1); 1256 __ bctr(); 1257 1258 __ align(32, 12); 1259 __ bind(Lnormal); 1260 __ pop_l(Rdividend); 1261 __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1262 __ bind(Ldone); 1263 } 1264 1265 void TemplateTable::lrem() { 1266 transition(ltos, ltos); 1267 1268 __ mr(R12_scratch2, R17_tos); 1269 ldiv(); 1270 __ mulld(R17_tos, R17_tos, R12_scratch2); 1271 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv. 1272 } 1273 1274 void TemplateTable::lshl() { 1275 transition(itos, ltos); 1276 1277 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1278 __ pop_l(R11_scratch1); 1279 __ sld(R17_tos, R11_scratch1, R17_tos); 1280 } 1281 1282 void TemplateTable::lshr() { 1283 transition(itos, ltos); 1284 1285 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1286 __ pop_l(R11_scratch1); 1287 __ srad(R17_tos, R11_scratch1, R17_tos); 1288 } 1289 1290 void TemplateTable::lushr() { 1291 transition(itos, ltos); 1292 1293 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1294 __ pop_l(R11_scratch1); 1295 __ srd(R17_tos, R11_scratch1, R17_tos); 1296 } 1297 1298 void TemplateTable::fop2(Operation op) { 1299 transition(ftos, ftos); 1300 1301 switch (op) { 1302 case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break; 1303 case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1304 case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break; 1305 case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1306 case rem: 1307 __ pop_f(F1_ARG1); 1308 __ fmr(F2_ARG2, F15_ftos); 1309 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1310 __ fmr(F15_ftos, F1_RET); 1311 break; 1312 1313 default: ShouldNotReachHere(); 1314 } 1315 } 1316 1317 void TemplateTable::dop2(Operation op) { 1318 transition(dtos, dtos); 1319 1320 switch (op) { 1321 case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break; 1322 case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break; 1323 case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break; 1324 case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break; 1325 case rem: 1326 __ pop_d(F1_ARG1); 1327 __ fmr(F2_ARG2, F15_ftos); 1328 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1329 __ fmr(F15_ftos, F1_RET); 1330 break; 1331 1332 default: ShouldNotReachHere(); 1333 } 1334 } 1335 1336 // Negate the value in the TOS cache. 1337 void TemplateTable::ineg() { 1338 transition(itos, itos); 1339 1340 __ neg(R17_tos, R17_tos); 1341 } 1342 1343 // Negate the value in the TOS cache. 1344 void TemplateTable::lneg() { 1345 transition(ltos, ltos); 1346 1347 __ neg(R17_tos, R17_tos); 1348 } 1349 1350 void TemplateTable::fneg() { 1351 transition(ftos, ftos); 1352 1353 __ fneg(F15_ftos, F15_ftos); 1354 } 1355 1356 void TemplateTable::dneg() { 1357 transition(dtos, dtos); 1358 1359 __ fneg(F15_ftos, F15_ftos); 1360 } 1361 1362 // Increments a local variable in place. 1363 void TemplateTable::iinc() { 1364 transition(vtos, vtos); 1365 1366 const Register Rindex = R11_scratch1, 1367 Rincrement = R0, 1368 Rvalue = R12_scratch2; 1369 1370 locals_index(Rindex); // Load locals index from bytecode stream. 1371 __ lbz(Rincrement, 2, R14_bcp); // Load increment from the bytecode stream. 1372 __ extsb(Rincrement, Rincrement); 1373 1374 __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex. 1375 1376 __ add(Rvalue, Rincrement, Rvalue); 1377 __ stw(Rvalue, 0, Rindex); 1378 } 1379 1380 void TemplateTable::wide_iinc() { 1381 transition(vtos, vtos); 1382 1383 Register Rindex = R11_scratch1, 1384 Rlocals_addr = Rindex, 1385 Rincr = R12_scratch2; 1386 locals_index_wide(Rindex); 1387 __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed); 1388 __ load_local_int(R17_tos, Rlocals_addr, Rindex); 1389 __ add(R17_tos, Rincr, R17_tos); 1390 __ stw(R17_tos, 0, Rlocals_addr); 1391 } 1392 1393 void TemplateTable::convert() { 1394 // %%%%% Factor this first part accross platforms 1395 #ifdef ASSERT 1396 TosState tos_in = ilgl; 1397 TosState tos_out = ilgl; 1398 switch (bytecode()) { 1399 case Bytecodes::_i2l: // fall through 1400 case Bytecodes::_i2f: // fall through 1401 case Bytecodes::_i2d: // fall through 1402 case Bytecodes::_i2b: // fall through 1403 case Bytecodes::_i2c: // fall through 1404 case Bytecodes::_i2s: tos_in = itos; break; 1405 case Bytecodes::_l2i: // fall through 1406 case Bytecodes::_l2f: // fall through 1407 case Bytecodes::_l2d: tos_in = ltos; break; 1408 case Bytecodes::_f2i: // fall through 1409 case Bytecodes::_f2l: // fall through 1410 case Bytecodes::_f2d: tos_in = ftos; break; 1411 case Bytecodes::_d2i: // fall through 1412 case Bytecodes::_d2l: // fall through 1413 case Bytecodes::_d2f: tos_in = dtos; break; 1414 default : ShouldNotReachHere(); 1415 } 1416 switch (bytecode()) { 1417 case Bytecodes::_l2i: // fall through 1418 case Bytecodes::_f2i: // fall through 1419 case Bytecodes::_d2i: // fall through 1420 case Bytecodes::_i2b: // fall through 1421 case Bytecodes::_i2c: // fall through 1422 case Bytecodes::_i2s: tos_out = itos; break; 1423 case Bytecodes::_i2l: // fall through 1424 case Bytecodes::_f2l: // fall through 1425 case Bytecodes::_d2l: tos_out = ltos; break; 1426 case Bytecodes::_i2f: // fall through 1427 case Bytecodes::_l2f: // fall through 1428 case Bytecodes::_d2f: tos_out = ftos; break; 1429 case Bytecodes::_i2d: // fall through 1430 case Bytecodes::_l2d: // fall through 1431 case Bytecodes::_f2d: tos_out = dtos; break; 1432 default : ShouldNotReachHere(); 1433 } 1434 transition(tos_in, tos_out); 1435 #endif 1436 1437 // Conversion 1438 Label done; 1439 switch (bytecode()) { 1440 case Bytecodes::_i2l: 1441 __ extsw(R17_tos, R17_tos); 1442 break; 1443 1444 case Bytecodes::_l2i: 1445 // Nothing to do, we'll continue to work with the lower bits. 1446 break; 1447 1448 case Bytecodes::_i2b: 1449 __ extsb(R17_tos, R17_tos); 1450 break; 1451 1452 case Bytecodes::_i2c: 1453 __ rldicl(R17_tos, R17_tos, 0, 64-2*8); 1454 break; 1455 1456 case Bytecodes::_i2s: 1457 __ extsh(R17_tos, R17_tos); 1458 break; 1459 1460 case Bytecodes::_i2d: 1461 __ extsw(R17_tos, R17_tos); 1462 case Bytecodes::_l2d: 1463 __ push_l_pop_d(); 1464 __ fcfid(F15_ftos, F15_ftos); 1465 break; 1466 1467 case Bytecodes::_i2f: 1468 __ extsw(R17_tos, R17_tos); 1469 __ push_l_pop_d(); 1470 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1471 // Comment: alternatively, load with sign extend could be done by lfiwax. 1472 __ fcfids(F15_ftos, F15_ftos); 1473 } else { 1474 __ fcfid(F15_ftos, F15_ftos); 1475 __ frsp(F15_ftos, F15_ftos); 1476 } 1477 break; 1478 1479 case Bytecodes::_l2f: 1480 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1481 __ push_l_pop_d(); 1482 __ fcfids(F15_ftos, F15_ftos); 1483 } else { 1484 // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp. 1485 __ mr(R3_ARG1, R17_tos); 1486 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f)); 1487 __ fmr(F15_ftos, F1_RET); 1488 } 1489 break; 1490 1491 case Bytecodes::_f2d: 1492 // empty 1493 break; 1494 1495 case Bytecodes::_d2f: 1496 __ frsp(F15_ftos, F15_ftos); 1497 break; 1498 1499 case Bytecodes::_d2i: 1500 case Bytecodes::_f2i: 1501 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1502 __ li(R17_tos, 0); // 0 in case of NAN 1503 __ bso(CCR0, done); 1504 __ fctiwz(F15_ftos, F15_ftos); 1505 __ push_d_pop_l(); 1506 break; 1507 1508 case Bytecodes::_d2l: 1509 case Bytecodes::_f2l: 1510 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1511 __ li(R17_tos, 0); // 0 in case of NAN 1512 __ bso(CCR0, done); 1513 __ fctidz(F15_ftos, F15_ftos); 1514 __ push_d_pop_l(); 1515 break; 1516 1517 default: ShouldNotReachHere(); 1518 } 1519 __ bind(done); 1520 } 1521 1522 // Long compare 1523 void TemplateTable::lcmp() { 1524 transition(ltos, itos); 1525 1526 const Register Rscratch = R11_scratch1; 1527 __ pop_l(Rscratch); // first operand, deeper in stack 1528 1529 __ cmpd(CCR0, Rscratch, R17_tos); // compare 1530 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1531 __ srwi(Rscratch, R17_tos, 30); 1532 __ srawi(R17_tos, R17_tos, 31); 1533 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1534 } 1535 1536 // fcmpl/fcmpg and dcmpl/dcmpg bytecodes 1537 // unordered_result == -1 => fcmpl or dcmpl 1538 // unordered_result == 1 => fcmpg or dcmpg 1539 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1540 const FloatRegister Rfirst = F0_SCRATCH, 1541 Rsecond = F15_ftos; 1542 const Register Rscratch = R11_scratch1; 1543 1544 if (is_float) { 1545 __ pop_f(Rfirst); 1546 } else { 1547 __ pop_d(Rfirst); 1548 } 1549 1550 Label Lunordered, Ldone; 1551 __ fcmpu(CCR0, Rfirst, Rsecond); // compare 1552 if (unordered_result) { 1553 __ bso(CCR0, Lunordered); 1554 } 1555 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1556 __ srwi(Rscratch, R17_tos, 30); 1557 __ srawi(R17_tos, R17_tos, 31); 1558 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1559 if (unordered_result) { 1560 __ b(Ldone); 1561 __ bind(Lunordered); 1562 __ load_const_optimized(R17_tos, unordered_result); 1563 } 1564 __ bind(Ldone); 1565 } 1566 1567 // Branch_conditional which takes TemplateTable::Condition. 1568 void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) { 1569 bool positive = false; 1570 Assembler::Condition cond = Assembler::equal; 1571 switch (cc) { 1572 case TemplateTable::equal: positive = true ; cond = Assembler::equal ; break; 1573 case TemplateTable::not_equal: positive = false; cond = Assembler::equal ; break; 1574 case TemplateTable::less: positive = true ; cond = Assembler::less ; break; 1575 case TemplateTable::less_equal: positive = false; cond = Assembler::greater; break; 1576 case TemplateTable::greater: positive = true ; cond = Assembler::greater; break; 1577 case TemplateTable::greater_equal: positive = false; cond = Assembler::less ; break; 1578 default: ShouldNotReachHere(); 1579 } 1580 int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 1581 int bi = Assembler::bi0(crx, cond); 1582 __ bc(bo, bi, L); 1583 } 1584 1585 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1586 1587 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1588 __ verify_thread(); 1589 1590 const Register Rscratch1 = R11_scratch1, 1591 Rscratch2 = R12_scratch2, 1592 Rscratch3 = R3_ARG1, 1593 R4_counters = R4_ARG2, 1594 bumped_count = R31, 1595 Rdisp = R22_tmp2; 1596 1597 __ profile_taken_branch(Rscratch1, bumped_count); 1598 1599 // Get (wide) offset. 1600 if (is_wide) { 1601 __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1602 } else { 1603 __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1604 } 1605 1606 // -------------------------------------------------------------------------- 1607 // Handle all the JSR stuff here, then exit. 1608 // It's much shorter and cleaner than intermingling with the 1609 // non-JSR normal-branch stuff occurring below. 1610 if (is_jsr) { 1611 // Compute return address as bci in Otos_i. 1612 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1613 __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3)); 1614 __ subf(R17_tos, Rscratch1, Rscratch2); 1615 1616 // Bump bcp to target of JSR. 1617 __ add(R14_bcp, Rdisp, R14_bcp); 1618 // Push returnAddress for "ret" on stack. 1619 __ push_ptr(R17_tos); 1620 // And away we go! 1621 __ dispatch_next(vtos); 1622 return; 1623 } 1624 1625 // -------------------------------------------------------------------------- 1626 // Normal (non-jsr) branch handling 1627 1628 const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1629 if (increment_invocation_counter_for_backward_branches) { 1630 //__ unimplemented("branch invocation counter"); 1631 1632 Label Lforward; 1633 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1634 1635 // Check branch direction. 1636 __ cmpdi(CCR0, Rdisp, 0); 1637 __ bgt(CCR0, Lforward); 1638 1639 __ get_method_counters(R19_method, R4_counters, Lforward); 1640 1641 if (TieredCompilation) { 1642 Label Lno_mdo, Loverflow; 1643 const int increment = InvocationCounter::count_increment; 1644 const int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 1645 if (ProfileInterpreter) { 1646 Register Rmdo = Rscratch1; 1647 1648 // If no method data exists, go to profile_continue. 1649 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); 1650 __ cmpdi(CCR0, Rmdo, 0); 1651 __ beq(CCR0, Lno_mdo); 1652 1653 // Increment backedge counter in the MDO. 1654 const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1655 __ lwz(Rscratch2, mdo_bc_offs, Rmdo); 1656 __ load_const_optimized(Rscratch3, mask, R0); 1657 __ addi(Rscratch2, Rscratch2, increment); 1658 __ stw(Rscratch2, mdo_bc_offs, Rmdo); 1659 __ and_(Rscratch3, Rscratch2, Rscratch3); 1660 __ bne(CCR0, Lforward); 1661 __ b(Loverflow); 1662 } 1663 1664 // If there's no MDO, increment counter in method. 1665 const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1666 __ bind(Lno_mdo); 1667 __ lwz(Rscratch2, mo_bc_offs, R4_counters); 1668 __ load_const_optimized(Rscratch3, mask, R0); 1669 __ addi(Rscratch2, Rscratch2, increment); 1670 __ stw(Rscratch2, mo_bc_offs, R19_method); 1671 __ and_(Rscratch3, Rscratch2, Rscratch3); 1672 __ bne(CCR0, Lforward); 1673 1674 __ bind(Loverflow); 1675 1676 // Notify point for loop, pass branch bytecode. 1677 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R14_bcp, true); 1678 1679 // Was an OSR adapter generated? 1680 // O0 = osr nmethod 1681 __ cmpdi(CCR0, R3_RET, 0); 1682 __ beq(CCR0, Lforward); 1683 1684 // Has the nmethod been invalidated already? 1685 __ lwz(R0, nmethod::entry_bci_offset(), R3_RET); 1686 __ cmpwi(CCR0, R0, InvalidOSREntryBci); 1687 __ beq(CCR0, Lforward); 1688 1689 // Migrate the interpreter frame off of the stack. 1690 // We can use all registers because we will not return to interpreter from this point. 1691 1692 // Save nmethod. 1693 const Register osr_nmethod = R31; 1694 __ mr(osr_nmethod, R3_RET); 1695 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1); 1696 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread); 1697 __ reset_last_Java_frame(); 1698 // OSR buffer is in ARG1. 1699 1700 // Remove the interpreter frame. 1701 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1702 1703 // Jump to the osr code. 1704 __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod); 1705 __ mtlr(R0); 1706 __ mtctr(R11_scratch1); 1707 __ bctr(); 1708 1709 } else { 1710 1711 const Register invoke_ctr = Rscratch1; 1712 // Update Backedge branch separately from invocations. 1713 __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3); 1714 1715 if (ProfileInterpreter) { 1716 __ test_invocation_counter_for_mdp(invoke_ctr, Rscratch2, Lforward); 1717 if (UseOnStackReplacement) { 1718 __ test_backedge_count_for_osr(bumped_count, R14_bcp, Rscratch2); 1719 } 1720 } else { 1721 if (UseOnStackReplacement) { 1722 __ test_backedge_count_for_osr(invoke_ctr, R14_bcp, Rscratch2); 1723 } 1724 } 1725 } 1726 1727 __ bind(Lforward); 1728 1729 } else { 1730 // Bump bytecode pointer by displacement (take the branch). 1731 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1732 } 1733 // Continue with bytecode @ target. 1734 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above, 1735 // %%%%% and changing dispatch_next to dispatch_only. 1736 __ dispatch_next(vtos); 1737 } 1738 1739 // Helper function for if_cmp* methods below. 1740 // Factored out common compare and branch code. 1741 void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) { 1742 Label Lnot_taken; 1743 // Note: The condition code we get is the condition under which we 1744 // *fall through*! So we have to inverse the CC here. 1745 1746 if (is_jint) { 1747 if (cmp0) { 1748 __ cmpwi(CCR0, Rfirst, 0); 1749 } else { 1750 __ cmpw(CCR0, Rfirst, Rsecond); 1751 } 1752 } else { 1753 if (cmp0) { 1754 __ cmpdi(CCR0, Rfirst, 0); 1755 } else { 1756 __ cmpd(CCR0, Rfirst, Rsecond); 1757 } 1758 } 1759 branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true); 1760 1761 // Conition is false => Jump! 1762 branch(false, false); 1763 1764 // Condition is not true => Continue. 1765 __ align(32, 12); 1766 __ bind(Lnot_taken); 1767 __ profile_not_taken_branch(Rscratch1, Rscratch2); 1768 } 1769 1770 // Compare integer values with zero and fall through if CC holds, branch away otherwise. 1771 void TemplateTable::if_0cmp(Condition cc) { 1772 transition(itos, vtos); 1773 1774 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true); 1775 } 1776 1777 // Compare integer values and fall through if CC holds, branch away otherwise. 1778 // 1779 // Interface: 1780 // - Rfirst: First operand (older stack value) 1781 // - tos: Second operand (younger stack value) 1782 void TemplateTable::if_icmp(Condition cc) { 1783 transition(itos, vtos); 1784 1785 const Register Rfirst = R0, 1786 Rsecond = R17_tos; 1787 1788 __ pop_i(Rfirst); 1789 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false); 1790 } 1791 1792 void TemplateTable::if_nullcmp(Condition cc) { 1793 transition(atos, vtos); 1794 1795 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true); 1796 } 1797 1798 void TemplateTable::if_acmp(Condition cc) { 1799 transition(atos, vtos); 1800 1801 const Register Rfirst = R0, 1802 Rsecond = R17_tos; 1803 1804 __ pop_ptr(Rfirst); 1805 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false); 1806 } 1807 1808 void TemplateTable::ret() { 1809 locals_index(R11_scratch1); 1810 __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1); 1811 1812 __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2); 1813 1814 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method); 1815 __ add(R11_scratch1, R17_tos, R11_scratch1); 1816 __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset())); 1817 __ dispatch_next(vtos); 1818 } 1819 1820 void TemplateTable::wide_ret() { 1821 transition(vtos, vtos); 1822 1823 const Register Rindex = R3_ARG1, 1824 Rscratch1 = R11_scratch1, 1825 Rscratch2 = R12_scratch2; 1826 1827 locals_index_wide(Rindex); 1828 __ load_local_ptr(R17_tos, R17_tos, Rindex); 1829 __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2); 1830 // Tos now contains the bci, compute the bcp from that. 1831 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1832 __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset())); 1833 __ add(R14_bcp, Rscratch1, Rscratch2); 1834 __ dispatch_next(vtos); 1835 } 1836 1837 void TemplateTable::tableswitch() { 1838 transition(itos, vtos); 1839 1840 Label Ldispatch, Ldefault_case; 1841 Register Rlow_byte = R3_ARG1, 1842 Rindex = Rlow_byte, 1843 Rhigh_byte = R4_ARG2, 1844 Rdef_offset_addr = R5_ARG3, // is going to contain address of default offset 1845 Rscratch1 = R11_scratch1, 1846 Rscratch2 = R12_scratch2, 1847 Roffset = R6_ARG4; 1848 1849 // Align bcp. 1850 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1851 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1852 1853 // Load lo & hi. 1854 __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1855 __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned); 1856 1857 // Check for default case (=index outside [low,high]). 1858 __ cmpw(CCR0, R17_tos, Rlow_byte); 1859 __ cmpw(CCR1, R17_tos, Rhigh_byte); 1860 __ blt(CCR0, Ldefault_case); 1861 __ bgt(CCR1, Ldefault_case); 1862 1863 // Lookup dispatch offset. 1864 __ sub(Rindex, R17_tos, Rlow_byte); 1865 __ extsw(Rindex, Rindex); 1866 __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2); 1867 __ sldi(Rindex, Rindex, LogBytesPerInt); 1868 __ addi(Rindex, Rindex, 3 * BytesPerInt); 1869 #if defined(VM_LITTLE_ENDIAN) 1870 __ lwbrx(Roffset, Rdef_offset_addr, Rindex); 1871 __ extsw(Roffset, Roffset); 1872 #else 1873 __ lwax(Roffset, Rdef_offset_addr, Rindex); 1874 #endif 1875 __ b(Ldispatch); 1876 1877 __ bind(Ldefault_case); 1878 __ profile_switch_default(Rhigh_byte, Rscratch1); 1879 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 1880 1881 __ bind(Ldispatch); 1882 1883 __ add(R14_bcp, Roffset, R14_bcp); 1884 __ dispatch_next(vtos); 1885 } 1886 1887 void TemplateTable::lookupswitch() { 1888 transition(itos, itos); 1889 __ stop("lookupswitch bytecode should have been rewritten"); 1890 } 1891 1892 // Table switch using linear search through cases. 1893 // Bytecode stream format: 1894 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 1895 // Note: Everything is big-endian format here. 1896 void TemplateTable::fast_linearswitch() { 1897 transition(itos, vtos); 1898 1899 Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case; 1900 Register Rcount = R3_ARG1, 1901 Rcurrent_pair = R4_ARG2, 1902 Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset. 1903 Roffset = R31, // Might need to survive C call. 1904 Rvalue = R12_scratch2, 1905 Rscratch = R11_scratch1, 1906 Rcmp_value = R17_tos; 1907 1908 // Align bcp. 1909 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1910 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1911 1912 // Setup loop counter and limit. 1913 __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1914 __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair. 1915 1916 __ mtctr(Rcount); 1917 __ cmpwi(CCR0, Rcount, 0); 1918 __ bne(CCR0, Lloop_entry); 1919 1920 // Default case 1921 __ bind(Ldefault_case); 1922 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 1923 if (ProfileInterpreter) { 1924 __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */); 1925 } 1926 __ b(Lcontinue_execution); 1927 1928 // Next iteration 1929 __ bind(Lsearch_loop); 1930 __ bdz(Ldefault_case); 1931 __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt); 1932 __ bind(Lloop_entry); 1933 __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned); 1934 __ cmpw(CCR0, Rvalue, Rcmp_value); 1935 __ bne(CCR0, Lsearch_loop); 1936 1937 // Found, load offset. 1938 __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed); 1939 // Calculate case index and profile 1940 __ mfctr(Rcurrent_pair); 1941 if (ProfileInterpreter) { 1942 __ sub(Rcurrent_pair, Rcount, Rcurrent_pair); 1943 __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch); 1944 } 1945 1946 __ bind(Lcontinue_execution); 1947 __ add(R14_bcp, Roffset, R14_bcp); 1948 __ dispatch_next(vtos); 1949 } 1950 1951 // Table switch using binary search (value/offset pairs are ordered). 1952 // Bytecode stream format: 1953 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 1954 // Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value. 1955 void TemplateTable::fast_binaryswitch() { 1956 1957 transition(itos, vtos); 1958 // Implementation using the following core algorithm: (copied from Intel) 1959 // 1960 // int binary_search(int key, LookupswitchPair* array, int n) { 1961 // // Binary search according to "Methodik des Programmierens" by 1962 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 1963 // int i = 0; 1964 // int j = n; 1965 // while (i+1 < j) { 1966 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 1967 // // with Q: for all i: 0 <= i < n: key < a[i] 1968 // // where a stands for the array and assuming that the (inexisting) 1969 // // element a[n] is infinitely big. 1970 // int h = (i + j) >> 1; 1971 // // i < h < j 1972 // if (key < array[h].fast_match()) { 1973 // j = h; 1974 // } else { 1975 // i = h; 1976 // } 1977 // } 1978 // // R: a[i] <= key < a[i+1] or Q 1979 // // (i.e., if key is within array, i is the correct index) 1980 // return i; 1981 // } 1982 1983 // register allocation 1984 const Register Rkey = R17_tos; // already set (tosca) 1985 const Register Rarray = R3_ARG1; 1986 const Register Ri = R4_ARG2; 1987 const Register Rj = R5_ARG3; 1988 const Register Rh = R6_ARG4; 1989 const Register Rscratch = R11_scratch1; 1990 1991 const int log_entry_size = 3; 1992 const int entry_size = 1 << log_entry_size; 1993 1994 Label found; 1995 1996 // Find Array start, 1997 __ addi(Rarray, R14_bcp, 3 * BytesPerInt); 1998 __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt)); 1999 2000 // initialize i & j 2001 __ li(Ri,0); 2002 __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned); 2003 2004 // and start. 2005 Label entry; 2006 __ b(entry); 2007 2008 // binary search loop 2009 { Label loop; 2010 __ bind(loop); 2011 // int h = (i + j) >> 1; 2012 __ srdi(Rh, Rh, 1); 2013 // if (key < array[h].fast_match()) { 2014 // j = h; 2015 // } else { 2016 // i = h; 2017 // } 2018 __ sldi(Rscratch, Rh, log_entry_size); 2019 #if defined(VM_LITTLE_ENDIAN) 2020 __ lwbrx(Rscratch, Rscratch, Rarray); 2021 #else 2022 __ lwzx(Rscratch, Rscratch, Rarray); 2023 #endif 2024 2025 // if (key < current value) 2026 // Rh = Rj 2027 // else 2028 // Rh = Ri 2029 Label Lgreater; 2030 __ cmpw(CCR0, Rkey, Rscratch); 2031 __ bge(CCR0, Lgreater); 2032 __ mr(Rj, Rh); 2033 __ b(entry); 2034 __ bind(Lgreater); 2035 __ mr(Ri, Rh); 2036 2037 // while (i+1 < j) 2038 __ bind(entry); 2039 __ addi(Rscratch, Ri, 1); 2040 __ cmpw(CCR0, Rscratch, Rj); 2041 __ add(Rh, Ri, Rj); // start h = i + j >> 1; 2042 2043 __ blt(CCR0, loop); 2044 } 2045 2046 // End of binary search, result index is i (must check again!). 2047 Label default_case; 2048 Label continue_execution; 2049 if (ProfileInterpreter) { 2050 __ mr(Rh, Ri); // Save index in i for profiling. 2051 } 2052 // Ri = value offset 2053 __ sldi(Ri, Ri, log_entry_size); 2054 __ add(Ri, Ri, Rarray); 2055 __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned); 2056 2057 Label not_found; 2058 // Ri = offset offset 2059 __ cmpw(CCR0, Rkey, Rscratch); 2060 __ beq(CCR0, not_found); 2061 // entry not found -> j = default offset 2062 __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned); 2063 __ b(default_case); 2064 2065 __ bind(not_found); 2066 // entry found -> j = offset 2067 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 2068 __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned); 2069 2070 if (ProfileInterpreter) { 2071 __ b(continue_execution); 2072 } 2073 2074 __ bind(default_case); // fall through (if not profiling) 2075 __ profile_switch_default(Ri, Rscratch); 2076 2077 __ bind(continue_execution); 2078 2079 __ extsw(Rj, Rj); 2080 __ add(R14_bcp, Rj, R14_bcp); 2081 __ dispatch_next(vtos); 2082 } 2083 2084 void TemplateTable::_return(TosState state) { 2085 transition(state, state); 2086 assert(_desc->calls_vm(), 2087 "inconsistent calls_vm information"); // call in remove_activation 2088 2089 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2090 2091 Register Rscratch = R11_scratch1, 2092 Rklass = R12_scratch2, 2093 Rklass_flags = Rklass; 2094 Label Lskip_register_finalizer; 2095 2096 // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case. 2097 assert(state == vtos, "only valid state"); 2098 __ ld(R17_tos, 0, R18_locals); 2099 2100 // Load klass of this obj. 2101 __ load_klass(Rklass, R17_tos); 2102 __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass); 2103 __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER)); 2104 __ bfalse(CCR0, Lskip_register_finalizer); 2105 2106 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */); 2107 2108 __ align(32, 12); 2109 __ bind(Lskip_register_finalizer); 2110 } 2111 2112 // Move the result value into the correct register and remove memory stack frame. 2113 __ remove_activation(state, /* throw_monitor_exception */ true); 2114 // Restoration of lr done by remove_activation. 2115 switch (state) { 2116 // Narrow result if state is itos but result type is smaller. 2117 // Need to narrow in the return bytecode rather than in generate_return_entry 2118 // since compiled code callers expect the result to already be narrowed. 2119 case itos: __ narrow(R17_tos); /* fall through */ 2120 case ltos: 2121 case btos: 2122 case ztos: 2123 case ctos: 2124 case stos: 2125 case atos: __ mr(R3_RET, R17_tos); break; 2126 case ftos: 2127 case dtos: __ fmr(F1_RET, F15_ftos); break; 2128 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need 2129 // to get visible before the reference to the object gets stored anywhere. 2130 __ membar(Assembler::StoreStore); break; 2131 default : ShouldNotReachHere(); 2132 } 2133 __ blr(); 2134 } 2135 2136 // ============================================================================ 2137 // Constant pool cache access 2138 // 2139 // Memory ordering: 2140 // 2141 // Like done in C++ interpreter, we load the fields 2142 // - _indices 2143 // - _f12_oop 2144 // acquired, because these are asked if the cache is already resolved. We don't 2145 // want to float loads above this check. 2146 // See also comments in ConstantPoolCacheEntry::bytecode_1(), 2147 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1(); 2148 2149 // Call into the VM if call site is not yet resolved 2150 // 2151 // Input regs: 2152 // - None, all passed regs are outputs. 2153 // 2154 // Returns: 2155 // - Rcache: The const pool cache entry that contains the resolved result. 2156 // - Rresult: Either noreg or output for f1/f2. 2157 // 2158 // Kills: 2159 // - Rscratch 2160 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) { 2161 2162 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2163 Label Lresolved, Ldone; 2164 2165 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2166 // We are resolved if the indices offset contains the current bytecode. 2167 #if defined(VM_LITTLE_ENDIAN) 2168 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache); 2169 #else 2170 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache); 2171 #endif 2172 // Acquire by cmp-br-isync (see below). 2173 __ cmpdi(CCR0, Rscratch, (int)bytecode()); 2174 __ beq(CCR0, Lresolved); 2175 2176 address entry = NULL; 2177 switch (bytecode()) { 2178 case Bytecodes::_getstatic : // fall through 2179 case Bytecodes::_putstatic : // fall through 2180 case Bytecodes::_getfield : // fall through 2181 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break; 2182 case Bytecodes::_invokevirtual : // fall through 2183 case Bytecodes::_invokespecial : // fall through 2184 case Bytecodes::_invokestatic : // fall through 2185 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; 2186 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break; 2187 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; 2188 default : ShouldNotReachHere(); break; 2189 } 2190 __ li(R4_ARG2, (int)bytecode()); 2191 __ call_VM(noreg, entry, R4_ARG2, true); 2192 2193 // Update registers with resolved info. 2194 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2195 __ b(Ldone); 2196 2197 __ bind(Lresolved); 2198 __ isync(); // Order load wrt. succeeding loads. 2199 __ bind(Ldone); 2200 } 2201 2202 // Load the constant pool cache entry at field accesses into registers. 2203 // The Rcache and Rindex registers must be set before call. 2204 // Input: 2205 // - Rcache, Rindex 2206 // Output: 2207 // - Robj, Roffset, Rflags 2208 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2209 Register Rcache, 2210 Register Rindex /* unused on PPC64 */, 2211 Register Roffset, 2212 Register Rflags, 2213 bool is_static = false) { 2214 assert_different_registers(Rcache, Rflags, Roffset); 2215 // assert(Rindex == noreg, "parameter not used on PPC64"); 2216 2217 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2218 __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); 2219 __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache); 2220 if (is_static) { 2221 __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache); 2222 __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj); 2223 // Acquire not needed here. Following access has an address dependency on this value. 2224 } 2225 } 2226 2227 // Load the constant pool cache entry at invokes into registers. 2228 // Resolve if necessary. 2229 2230 // Input Registers: 2231 // - None, bcp is used, though 2232 // 2233 // Return registers: 2234 // - Rmethod (f1 field or f2 if invokevirtual) 2235 // - Ritable_index (f2 field) 2236 // - Rflags (flags field) 2237 // 2238 // Kills: 2239 // - R21 2240 // 2241 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2242 Register Rmethod, 2243 Register Ritable_index, 2244 Register Rflags, 2245 bool is_invokevirtual, 2246 bool is_invokevfinal, 2247 bool is_invokedynamic) { 2248 2249 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2250 // Determine constant pool cache field offsets. 2251 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2252 const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset())); 2253 const int flags_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()); 2254 // Access constant pool cache fields. 2255 const int index_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()); 2256 2257 Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP. 2258 2259 if (is_invokevfinal) { 2260 assert(Ritable_index == noreg, "register not used"); 2261 // Already resolved. 2262 __ get_cache_and_index_at_bcp(Rcache, 1); 2263 } else { 2264 resolve_cache_and_index(byte_no, Rcache, R0, is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2265 } 2266 2267 __ ld(Rmethod, method_offset, Rcache); 2268 __ ld(Rflags, flags_offset, Rcache); 2269 2270 if (Ritable_index != noreg) { 2271 __ ld(Ritable_index, index_offset, Rcache); 2272 } 2273 } 2274 2275 // ============================================================================ 2276 // Field access 2277 2278 // Volatile variables demand their effects be made known to all CPU's 2279 // in order. Store buffers on most chips allow reads & writes to 2280 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode 2281 // without some kind of memory barrier (i.e., it's not sufficient that 2282 // the interpreter does not reorder volatile references, the hardware 2283 // also must not reorder them). 2284 // 2285 // According to the new Java Memory Model (JMM): 2286 // (1) All volatiles are serialized wrt to each other. ALSO reads & 2287 // writes act as aquire & release, so: 2288 // (2) A read cannot let unrelated NON-volatile memory refs that 2289 // happen after the read float up to before the read. It's OK for 2290 // non-volatile memory refs that happen before the volatile read to 2291 // float down below it. 2292 // (3) Similar a volatile write cannot let unrelated NON-volatile 2293 // memory refs that happen BEFORE the write float down to after the 2294 // write. It's OK for non-volatile memory refs that happen after the 2295 // volatile write to float up before it. 2296 // 2297 // We only put in barriers around volatile refs (they are expensive), 2298 // not _between_ memory refs (that would require us to track the 2299 // flavor of the previous memory refs). Requirements (2) and (3) 2300 // require some barriers before volatile stores and after volatile 2301 // loads. These nearly cover requirement (1) but miss the 2302 // volatile-store-volatile-load case. This final case is placed after 2303 // volatile-stores although it could just as well go before 2304 // volatile-loads. 2305 2306 // The registers cache and index expected to be set before call. 2307 // Correct values of the cache and index registers are preserved. 2308 // Kills: 2309 // Rcache (if has_tos) 2310 // Rscratch 2311 void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) { 2312 2313 assert_different_registers(Rcache, Rscratch); 2314 2315 if (JvmtiExport::can_post_field_access()) { 2316 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2317 Label Lno_field_access_post; 2318 2319 // Check if post field access in enabled. 2320 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true); 2321 __ lwz(Rscratch, offs, Rscratch); 2322 2323 __ cmpwi(CCR0, Rscratch, 0); 2324 __ beq(CCR0, Lno_field_access_post); 2325 2326 // Post access enabled - do it! 2327 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2328 if (is_static) { 2329 __ li(R17_tos, 0); 2330 } else { 2331 if (has_tos) { 2332 // The fast bytecode versions have obj ptr in register. 2333 // Thus, save object pointer before call_VM() clobbers it 2334 // put object on tos where GC wants it. 2335 __ push_ptr(R17_tos); 2336 } else { 2337 // Load top of stack (do not pop the value off the stack). 2338 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); 2339 } 2340 __ verify_oop(R17_tos); 2341 } 2342 // tos: object pointer or NULL if static 2343 // cache: cache entry pointer 2344 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache); 2345 if (!is_static && has_tos) { 2346 // Restore object pointer. 2347 __ pop_ptr(R17_tos); 2348 __ verify_oop(R17_tos); 2349 } else { 2350 // Cache is still needed to get class or obj. 2351 __ get_cache_and_index_at_bcp(Rcache, 1); 2352 } 2353 2354 __ align(32, 12); 2355 __ bind(Lno_field_access_post); 2356 } 2357 } 2358 2359 // kills R11_scratch1 2360 void TemplateTable::pop_and_check_object(Register Roop) { 2361 Register Rtmp = R11_scratch1; 2362 2363 assert_different_registers(Rtmp, Roop); 2364 __ pop_ptr(Roop); 2365 // For field access must check obj. 2366 __ null_check_throw(Roop, -1, Rtmp); 2367 __ verify_oop(Roop); 2368 } 2369 2370 // PPC64: implement volatile loads as fence-store-acquire. 2371 void TemplateTable::getfield_or_static(int byte_no, bool is_static) { 2372 transition(vtos, vtos); 2373 2374 Label Lacquire, Lisync; 2375 2376 const Register Rcache = R3_ARG1, 2377 Rclass_or_obj = R22_tmp2, 2378 Roffset = R23_tmp3, 2379 Rflags = R31, 2380 Rbtable = R5_ARG3, 2381 Rbc = R6_ARG4, 2382 Rscratch = R12_scratch2; 2383 2384 static address field_branch_table[number_of_states], 2385 static_branch_table[number_of_states]; 2386 2387 address* branch_table = is_static ? static_branch_table : field_branch_table; 2388 2389 // Get field offset. 2390 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2391 2392 // JVMTI support 2393 jvmti_post_field_access(Rcache, Rscratch, is_static, false); 2394 2395 // Load after possible GC. 2396 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2397 2398 // Load pointer to branch table. 2399 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2400 2401 // Get volatile flag. 2402 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2403 // Note: sync is needed before volatile load on PPC64. 2404 2405 // Check field type. 2406 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2407 2408 #ifdef ASSERT 2409 Label LFlagInvalid; 2410 __ cmpldi(CCR0, Rflags, number_of_states); 2411 __ bge(CCR0, LFlagInvalid); 2412 #endif 2413 2414 // Load from branch table and dispatch (volatile case: one instruction ahead). 2415 __ sldi(Rflags, Rflags, LogBytesPerWord); 2416 __ cmpwi(CCR6, Rscratch, 1); // Volatile? 2417 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2418 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0. 2419 } 2420 __ ldx(Rbtable, Rbtable, Rflags); 2421 2422 // Get the obj from stack. 2423 if (!is_static) { 2424 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2425 } else { 2426 __ verify_oop(Rclass_or_obj); 2427 } 2428 2429 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2430 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2431 } 2432 __ mtctr(Rbtable); 2433 __ bctr(); 2434 2435 #ifdef ASSERT 2436 __ bind(LFlagInvalid); 2437 __ stop("got invalid flag", 0x654); 2438 2439 // __ bind(Lvtos); 2440 address pc_before_fence = __ pc(); 2441 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2442 assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2443 assert(branch_table[vtos] == 0, "can't compute twice"); 2444 branch_table[vtos] = __ pc(); // non-volatile_entry point 2445 __ stop("vtos unexpected", 0x655); 2446 #endif 2447 2448 __ align(32, 28, 28); // Align load. 2449 // __ bind(Ldtos); 2450 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2451 assert(branch_table[dtos] == 0, "can't compute twice"); 2452 branch_table[dtos] = __ pc(); // non-volatile_entry point 2453 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 2454 __ push(dtos); 2455 if (!is_static) patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch); 2456 { 2457 Label acquire_double; 2458 __ beq(CCR6, acquire_double); // Volatile? 2459 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2460 2461 __ bind(acquire_double); 2462 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2463 __ beq_predict_taken(CCR0, Lisync); 2464 __ b(Lisync); // In case of NAN. 2465 } 2466 2467 __ align(32, 28, 28); // Align load. 2468 // __ bind(Lftos); 2469 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2470 assert(branch_table[ftos] == 0, "can't compute twice"); 2471 branch_table[ftos] = __ pc(); // non-volatile_entry point 2472 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 2473 __ push(ftos); 2474 if (!is_static) { patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); } 2475 { 2476 Label acquire_float; 2477 __ beq(CCR6, acquire_float); // Volatile? 2478 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2479 2480 __ bind(acquire_float); 2481 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2482 __ beq_predict_taken(CCR0, Lisync); 2483 __ b(Lisync); // In case of NAN. 2484 } 2485 2486 __ align(32, 28, 28); // Align load. 2487 // __ bind(Litos); 2488 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2489 assert(branch_table[itos] == 0, "can't compute twice"); 2490 branch_table[itos] = __ pc(); // non-volatile_entry point 2491 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2492 __ push(itos); 2493 if (!is_static) patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch); 2494 __ beq(CCR6, Lacquire); // Volatile? 2495 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2496 2497 __ align(32, 28, 28); // Align load. 2498 // __ bind(Lltos); 2499 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2500 assert(branch_table[ltos] == 0, "can't compute twice"); 2501 branch_table[ltos] = __ pc(); // non-volatile_entry point 2502 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2503 __ push(ltos); 2504 if (!is_static) patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch); 2505 __ beq(CCR6, Lacquire); // Volatile? 2506 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2507 2508 __ align(32, 28, 28); // Align load. 2509 // __ bind(Lbtos); 2510 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2511 assert(branch_table[btos] == 0, "can't compute twice"); 2512 branch_table[btos] = __ pc(); // non-volatile_entry point 2513 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2514 __ extsb(R17_tos, R17_tos); 2515 __ push(btos); 2516 if (!is_static) patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2517 __ beq(CCR6, Lacquire); // Volatile? 2518 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2519 2520 __ align(32, 28, 28); // Align load. 2521 // __ bind(Lztos); (same code as btos) 2522 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2523 assert(branch_table[ztos] == 0, "can't compute twice"); 2524 branch_table[ztos] = __ pc(); // non-volatile_entry point 2525 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2526 __ extsb(R17_tos, R17_tos); 2527 __ push(ztos); 2528 if (!is_static) { 2529 // use btos rewriting, no truncating to t/f bit is needed for getfield. 2530 patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2531 } 2532 __ beq(CCR6, Lacquire); // Volatile? 2533 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2534 2535 __ align(32, 28, 28); // Align load. 2536 // __ bind(Lctos); 2537 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2538 assert(branch_table[ctos] == 0, "can't compute twice"); 2539 branch_table[ctos] = __ pc(); // non-volatile_entry point 2540 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 2541 __ push(ctos); 2542 if (!is_static) patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch); 2543 __ beq(CCR6, Lacquire); // Volatile? 2544 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2545 2546 __ align(32, 28, 28); // Align load. 2547 // __ bind(Lstos); 2548 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2549 assert(branch_table[stos] == 0, "can't compute twice"); 2550 branch_table[stos] = __ pc(); // non-volatile_entry point 2551 __ lhax(R17_tos, Rclass_or_obj, Roffset); 2552 __ push(stos); 2553 if (!is_static) patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch); 2554 __ beq(CCR6, Lacquire); // Volatile? 2555 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2556 2557 __ align(32, 28, 28); // Align load. 2558 // __ bind(Latos); 2559 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2560 assert(branch_table[atos] == 0, "can't compute twice"); 2561 branch_table[atos] = __ pc(); // non-volatile_entry point 2562 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2563 __ verify_oop(R17_tos); 2564 __ push(atos); 2565 //__ dcbt(R17_tos); // prefetch 2566 if (!is_static) patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch); 2567 __ beq(CCR6, Lacquire); // Volatile? 2568 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2569 2570 __ align(32, 12); 2571 __ bind(Lacquire); 2572 __ twi_0(R17_tos); 2573 __ bind(Lisync); 2574 __ isync(); // acquire 2575 2576 #ifdef ASSERT 2577 for (int i = 0; i<number_of_states; ++i) { 2578 assert(branch_table[i], "get initialization"); 2579 //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2580 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2581 } 2582 #endif 2583 } 2584 2585 void TemplateTable::getfield(int byte_no) { 2586 getfield_or_static(byte_no, false); 2587 } 2588 2589 void TemplateTable::getstatic(int byte_no) { 2590 getfield_or_static(byte_no, true); 2591 } 2592 2593 // The registers cache and index expected to be set before call. 2594 // The function may destroy various registers, just not the cache and index registers. 2595 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) { 2596 2597 assert_different_registers(Rcache, Rscratch, R6_ARG4); 2598 2599 if (JvmtiExport::can_post_field_modification()) { 2600 Label Lno_field_mod_post; 2601 2602 // Check if post field access in enabled. 2603 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true); 2604 __ lwz(Rscratch, offs, Rscratch); 2605 2606 __ cmpwi(CCR0, Rscratch, 0); 2607 __ beq(CCR0, Lno_field_mod_post); 2608 2609 // Do the post 2610 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2611 const Register Robj = Rscratch; 2612 2613 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2614 if (is_static) { 2615 // Life is simple. Null out the object pointer. 2616 __ li(Robj, 0); 2617 } else { 2618 // In case of the fast versions, value lives in registers => put it back on tos. 2619 int offs = Interpreter::expr_offset_in_bytes(0); 2620 Register base = R15_esp; 2621 switch(bytecode()) { 2622 case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break; 2623 case Bytecodes::_fast_iputfield: // Fall through 2624 case Bytecodes::_fast_bputfield: // Fall through 2625 case Bytecodes::_fast_zputfield: // Fall through 2626 case Bytecodes::_fast_cputfield: // Fall through 2627 case Bytecodes::_fast_sputfield: __ push_i(); offs+= Interpreter::stackElementSize; break; 2628 case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break; 2629 case Bytecodes::_fast_fputfield: __ push_f(); offs+= Interpreter::stackElementSize; break; 2630 case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break; 2631 default: { 2632 offs = 0; 2633 base = Robj; 2634 const Register Rflags = Robj; 2635 Label is_one_slot; 2636 // Life is harder. The stack holds the value on top, followed by the 2637 // object. We don't know the size of the value, though; it could be 2638 // one or two words depending on its type. As a result, we must find 2639 // the type to determine where the object is. 2640 __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian 2641 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2642 2643 __ cmpwi(CCR0, Rflags, ltos); 2644 __ cmpwi(CCR1, Rflags, dtos); 2645 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1)); 2646 __ crnor(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); 2647 __ beq(CCR0, is_one_slot); 2648 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2)); 2649 __ bind(is_one_slot); 2650 break; 2651 } 2652 } 2653 __ ld(Robj, offs, base); 2654 __ verify_oop(Robj); 2655 } 2656 2657 __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0)); 2658 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4); 2659 __ get_cache_and_index_at_bcp(Rcache, 1); 2660 2661 // In case of the fast versions, value lives in registers => put it back on tos. 2662 switch(bytecode()) { 2663 case Bytecodes::_fast_aputfield: __ pop_ptr(); break; 2664 case Bytecodes::_fast_iputfield: // Fall through 2665 case Bytecodes::_fast_bputfield: // Fall through 2666 case Bytecodes::_fast_zputfield: // Fall through 2667 case Bytecodes::_fast_cputfield: // Fall through 2668 case Bytecodes::_fast_sputfield: __ pop_i(); break; 2669 case Bytecodes::_fast_lputfield: __ pop_l(); break; 2670 case Bytecodes::_fast_fputfield: __ pop_f(); break; 2671 case Bytecodes::_fast_dputfield: __ pop_d(); break; 2672 default: break; // Nothin' to do. 2673 } 2674 2675 __ align(32, 12); 2676 __ bind(Lno_field_mod_post); 2677 } 2678 } 2679 2680 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release). 2681 void TemplateTable::putfield_or_static(int byte_no, bool is_static) { 2682 Label Lvolatile; 2683 2684 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2685 Rclass_or_obj = R31, // Needs to survive C call. 2686 Roffset = R22_tmp2, // Needs to survive C call. 2687 Rflags = R3_ARG1, 2688 Rbtable = R4_ARG2, 2689 Rscratch = R11_scratch1, 2690 Rscratch2 = R12_scratch2, 2691 Rscratch3 = R6_ARG4, 2692 Rbc = Rscratch3; 2693 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2694 2695 static address field_branch_table[number_of_states], 2696 static_branch_table[number_of_states]; 2697 2698 address* branch_table = is_static ? static_branch_table : field_branch_table; 2699 2700 // Stack (grows up): 2701 // value 2702 // obj 2703 2704 // Load the field offset. 2705 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2706 jvmti_post_field_mod(Rcache, Rscratch, is_static); 2707 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2708 2709 // Load pointer to branch table. 2710 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2711 2712 // Get volatile flag. 2713 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2714 2715 // Check the field type. 2716 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2717 2718 #ifdef ASSERT 2719 Label LFlagInvalid; 2720 __ cmpldi(CCR0, Rflags, number_of_states); 2721 __ bge(CCR0, LFlagInvalid); 2722 #endif 2723 2724 // Load from branch table and dispatch (volatile case: one instruction ahead). 2725 __ sldi(Rflags, Rflags, LogBytesPerWord); 2726 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpwi(CR_is_vol, Rscratch, 1); } // Volatile? 2727 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0. 2728 __ ldx(Rbtable, Rbtable, Rflags); 2729 2730 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2731 __ mtctr(Rbtable); 2732 __ bctr(); 2733 2734 #ifdef ASSERT 2735 __ bind(LFlagInvalid); 2736 __ stop("got invalid flag", 0x656); 2737 2738 // __ bind(Lvtos); 2739 address pc_before_release = __ pc(); 2740 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2741 assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2742 assert(branch_table[vtos] == 0, "can't compute twice"); 2743 branch_table[vtos] = __ pc(); // non-volatile_entry point 2744 __ stop("vtos unexpected", 0x657); 2745 #endif 2746 2747 __ align(32, 28, 28); // Align pop. 2748 // __ bind(Ldtos); 2749 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2750 assert(branch_table[dtos] == 0, "can't compute twice"); 2751 branch_table[dtos] = __ pc(); // non-volatile_entry point 2752 __ pop(dtos); 2753 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2754 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2755 if (!is_static) { patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); } 2756 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2757 __ beq(CR_is_vol, Lvolatile); // Volatile? 2758 } 2759 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2760 2761 __ align(32, 28, 28); // Align pop. 2762 // __ bind(Lftos); 2763 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2764 assert(branch_table[ftos] == 0, "can't compute twice"); 2765 branch_table[ftos] = __ pc(); // non-volatile_entry point 2766 __ pop(ftos); 2767 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2768 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2769 if (!is_static) { patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); } 2770 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2771 __ beq(CR_is_vol, Lvolatile); // Volatile? 2772 } 2773 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2774 2775 __ align(32, 28, 28); // Align pop. 2776 // __ bind(Litos); 2777 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2778 assert(branch_table[itos] == 0, "can't compute twice"); 2779 branch_table[itos] = __ pc(); // non-volatile_entry point 2780 __ pop(itos); 2781 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2782 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2783 if (!is_static) { patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); } 2784 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2785 __ beq(CR_is_vol, Lvolatile); // Volatile? 2786 } 2787 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2788 2789 __ align(32, 28, 28); // Align pop. 2790 // __ bind(Lltos); 2791 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2792 assert(branch_table[ltos] == 0, "can't compute twice"); 2793 branch_table[ltos] = __ pc(); // non-volatile_entry point 2794 __ pop(ltos); 2795 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2796 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2797 if (!is_static) { patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); } 2798 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2799 __ beq(CR_is_vol, Lvolatile); // Volatile? 2800 } 2801 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2802 2803 __ align(32, 28, 28); // Align pop. 2804 // __ bind(Lbtos); 2805 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2806 assert(branch_table[btos] == 0, "can't compute twice"); 2807 branch_table[btos] = __ pc(); // non-volatile_entry point 2808 __ pop(btos); 2809 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2810 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2811 if (!is_static) { patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); } 2812 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2813 __ beq(CR_is_vol, Lvolatile); // Volatile? 2814 } 2815 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2816 2817 __ align(32, 28, 28); // Align pop. 2818 // __ bind(Lztos); 2819 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2820 assert(branch_table[ztos] == 0, "can't compute twice"); 2821 branch_table[ztos] = __ pc(); // non-volatile_entry point 2822 __ pop(ztos); 2823 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2824 __ andi(R17_tos, R17_tos, 0x1); 2825 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2826 if (!is_static) { patch_bytecode(Bytecodes::_fast_zputfield, Rbc, Rscratch, true, byte_no); } 2827 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2828 __ beq(CR_is_vol, Lvolatile); // Volatile? 2829 } 2830 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2831 2832 __ align(32, 28, 28); // Align pop. 2833 // __ bind(Lctos); 2834 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2835 assert(branch_table[ctos] == 0, "can't compute twice"); 2836 branch_table[ctos] = __ pc(); // non-volatile_entry point 2837 __ pop(ctos); 2838 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.. 2839 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2840 if (!is_static) { patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); } 2841 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2842 __ beq(CR_is_vol, Lvolatile); // Volatile? 2843 } 2844 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2845 2846 __ align(32, 28, 28); // Align pop. 2847 // __ bind(Lstos); 2848 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2849 assert(branch_table[stos] == 0, "can't compute twice"); 2850 branch_table[stos] = __ pc(); // non-volatile_entry point 2851 __ pop(stos); 2852 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2853 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2854 if (!is_static) { patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); } 2855 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2856 __ beq(CR_is_vol, Lvolatile); // Volatile? 2857 } 2858 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2859 2860 __ align(32, 28, 28); // Align pop. 2861 // __ bind(Latos); 2862 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2863 assert(branch_table[atos] == 0, "can't compute twice"); 2864 branch_table[atos] = __ pc(); // non-volatile_entry point 2865 __ pop(atos); 2866 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1 2867 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 2868 if (!is_static) { patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); } 2869 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2870 __ beq(CR_is_vol, Lvolatile); // Volatile? 2871 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2872 2873 __ align(32, 12); 2874 __ bind(Lvolatile); 2875 __ fence(); 2876 } 2877 // fallthru: __ b(Lexit); 2878 2879 #ifdef ASSERT 2880 for (int i = 0; i<number_of_states; ++i) { 2881 assert(branch_table[i], "put initialization"); 2882 //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2883 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2884 } 2885 #endif 2886 } 2887 2888 void TemplateTable::putfield(int byte_no) { 2889 putfield_or_static(byte_no, false); 2890 } 2891 2892 void TemplateTable::putstatic(int byte_no) { 2893 putfield_or_static(byte_no, true); 2894 } 2895 2896 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job. 2897 void TemplateTable::jvmti_post_fast_field_mod() { 2898 __ should_not_reach_here(); 2899 } 2900 2901 void TemplateTable::fast_storefield(TosState state) { 2902 transition(state, vtos); 2903 2904 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2905 Rclass_or_obj = R31, // Needs to survive C call. 2906 Roffset = R22_tmp2, // Needs to survive C call. 2907 Rflags = R3_ARG1, 2908 Rscratch = R11_scratch1, 2909 Rscratch2 = R12_scratch2, 2910 Rscratch3 = R4_ARG2; 2911 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2912 2913 // Constant pool already resolved => Load flags and offset of field. 2914 __ get_cache_and_index_at_bcp(Rcache, 1); 2915 jvmti_post_field_mod(Rcache, Rscratch, false /* not static */); 2916 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 2917 2918 // Get the obj and the final store addr. 2919 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2920 2921 // Get volatile flag. 2922 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2923 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); } 2924 { 2925 Label LnotVolatile; 2926 __ beq(CCR0, LnotVolatile); 2927 __ release(); 2928 __ align(32, 12); 2929 __ bind(LnotVolatile); 2930 } 2931 2932 // Do the store and fencing. 2933 switch(bytecode()) { 2934 case Bytecodes::_fast_aputfield: 2935 // Store into the field. 2936 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 2937 break; 2938 2939 case Bytecodes::_fast_iputfield: 2940 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2941 break; 2942 2943 case Bytecodes::_fast_lputfield: 2944 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2945 break; 2946 2947 case Bytecodes::_fast_zputfield: 2948 __ andi(R17_tos, R17_tos, 0x1); // boolean is true if LSB is 1 2949 // fall through to bputfield 2950 case Bytecodes::_fast_bputfield: 2951 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2952 break; 2953 2954 case Bytecodes::_fast_cputfield: 2955 case Bytecodes::_fast_sputfield: 2956 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2957 break; 2958 2959 case Bytecodes::_fast_fputfield: 2960 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2961 break; 2962 2963 case Bytecodes::_fast_dputfield: 2964 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2965 break; 2966 2967 default: ShouldNotReachHere(); 2968 } 2969 2970 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2971 Label LVolatile; 2972 __ beq(CR_is_vol, LVolatile); 2973 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2974 2975 __ align(32, 12); 2976 __ bind(LVolatile); 2977 __ fence(); 2978 } 2979 } 2980 2981 void TemplateTable::fast_accessfield(TosState state) { 2982 transition(atos, state); 2983 2984 Label LisVolatile; 2985 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2986 2987 const Register Rcache = R3_ARG1, 2988 Rclass_or_obj = R17_tos, 2989 Roffset = R22_tmp2, 2990 Rflags = R23_tmp3, 2991 Rscratch = R12_scratch2; 2992 2993 // Constant pool already resolved. Get the field offset. 2994 __ get_cache_and_index_at_bcp(Rcache, 1); 2995 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 2996 2997 // JVMTI support 2998 jvmti_post_field_access(Rcache, Rscratch, false, true); 2999 3000 // Get the load address. 3001 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3002 3003 // Get volatile flag. 3004 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3005 __ bne(CCR0, LisVolatile); 3006 3007 switch(bytecode()) { 3008 case Bytecodes::_fast_agetfield: 3009 { 3010 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3011 __ verify_oop(R17_tos); 3012 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3013 3014 __ bind(LisVolatile); 3015 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3016 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3017 __ verify_oop(R17_tos); 3018 __ twi_0(R17_tos); 3019 __ isync(); 3020 break; 3021 } 3022 case Bytecodes::_fast_igetfield: 3023 { 3024 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3025 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3026 3027 __ bind(LisVolatile); 3028 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3029 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3030 __ twi_0(R17_tos); 3031 __ isync(); 3032 break; 3033 } 3034 case Bytecodes::_fast_lgetfield: 3035 { 3036 __ ldx(R17_tos, Rclass_or_obj, Roffset); 3037 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3038 3039 __ bind(LisVolatile); 3040 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3041 __ ldx(R17_tos, Rclass_or_obj, Roffset); 3042 __ twi_0(R17_tos); 3043 __ isync(); 3044 break; 3045 } 3046 case Bytecodes::_fast_bgetfield: 3047 { 3048 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3049 __ extsb(R17_tos, R17_tos); 3050 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3051 3052 __ bind(LisVolatile); 3053 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3054 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3055 __ twi_0(R17_tos); 3056 __ extsb(R17_tos, R17_tos); 3057 __ isync(); 3058 break; 3059 } 3060 case Bytecodes::_fast_cgetfield: 3061 { 3062 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3063 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3064 3065 __ bind(LisVolatile); 3066 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3067 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3068 __ twi_0(R17_tos); 3069 __ isync(); 3070 break; 3071 } 3072 case Bytecodes::_fast_sgetfield: 3073 { 3074 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3075 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3076 3077 __ bind(LisVolatile); 3078 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3079 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3080 __ twi_0(R17_tos); 3081 __ isync(); 3082 break; 3083 } 3084 case Bytecodes::_fast_fgetfield: 3085 { 3086 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3087 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3088 3089 __ bind(LisVolatile); 3090 Label Ldummy; 3091 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3092 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3093 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3094 __ bne_predict_not_taken(CCR0, Ldummy); 3095 __ bind(Ldummy); 3096 __ isync(); 3097 break; 3098 } 3099 case Bytecodes::_fast_dgetfield: 3100 { 3101 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3102 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3103 3104 __ bind(LisVolatile); 3105 Label Ldummy; 3106 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3107 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3108 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3109 __ bne_predict_not_taken(CCR0, Ldummy); 3110 __ bind(Ldummy); 3111 __ isync(); 3112 break; 3113 } 3114 default: ShouldNotReachHere(); 3115 } 3116 } 3117 3118 void TemplateTable::fast_xaccess(TosState state) { 3119 transition(vtos, state); 3120 3121 Label LisVolatile; 3122 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3123 const Register Rcache = R3_ARG1, 3124 Rclass_or_obj = R17_tos, 3125 Roffset = R22_tmp2, 3126 Rflags = R23_tmp3, 3127 Rscratch = R12_scratch2; 3128 3129 __ ld(Rclass_or_obj, 0, R18_locals); 3130 3131 // Constant pool already resolved. Get the field offset. 3132 __ get_cache_and_index_at_bcp(Rcache, 2); 3133 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3134 3135 // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches. 3136 3137 // Needed to report exception at the correct bcp. 3138 __ addi(R14_bcp, R14_bcp, 1); 3139 3140 // Get the load address. 3141 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3142 3143 // Get volatile flag. 3144 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3145 __ bne(CCR0, LisVolatile); 3146 3147 switch(state) { 3148 case atos: 3149 { 3150 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3151 __ verify_oop(R17_tos); 3152 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3153 3154 __ bind(LisVolatile); 3155 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3156 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3157 __ verify_oop(R17_tos); 3158 __ twi_0(R17_tos); 3159 __ isync(); 3160 break; 3161 } 3162 case itos: 3163 { 3164 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3165 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3166 3167 __ bind(LisVolatile); 3168 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3169 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3170 __ twi_0(R17_tos); 3171 __ isync(); 3172 break; 3173 } 3174 case ftos: 3175 { 3176 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3177 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3178 3179 __ bind(LisVolatile); 3180 Label Ldummy; 3181 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3182 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3183 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3184 __ bne_predict_not_taken(CCR0, Ldummy); 3185 __ bind(Ldummy); 3186 __ isync(); 3187 break; 3188 } 3189 default: ShouldNotReachHere(); 3190 } 3191 __ addi(R14_bcp, R14_bcp, -1); 3192 } 3193 3194 // ============================================================================ 3195 // Calls 3196 3197 // Common code for invoke 3198 // 3199 // Input: 3200 // - byte_no 3201 // 3202 // Output: 3203 // - Rmethod: The method to invoke next. 3204 // - Rret_addr: The return address to return to. 3205 // - Rindex: MethodType (invokehandle) or CallSite obj (invokedynamic) 3206 // - Rrecv: Cache for "this" pointer, might be noreg if static call. 3207 // - Rflags: Method flags from const pool cache. 3208 // 3209 // Kills: 3210 // - Rscratch1 3211 // 3212 void TemplateTable::prepare_invoke(int byte_no, 3213 Register Rmethod, // linked method (or i-klass) 3214 Register Rret_addr,// return address 3215 Register Rindex, // itable index, MethodType, etc. 3216 Register Rrecv, // If caller wants to see it. 3217 Register Rflags, // If caller wants to test it. 3218 Register Rscratch 3219 ) { 3220 // Determine flags. 3221 const Bytecodes::Code code = bytecode(); 3222 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 3223 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 3224 const bool is_invokehandle = code == Bytecodes::_invokehandle; 3225 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 3226 const bool is_invokespecial = code == Bytecodes::_invokespecial; 3227 const bool load_receiver = (Rrecv != noreg); 3228 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 3229 3230 assert_different_registers(Rmethod, Rindex, Rflags, Rscratch); 3231 assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch); 3232 assert_different_registers(Rret_addr, Rscratch); 3233 3234 load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic); 3235 3236 // Saving of SP done in call_from_interpreter. 3237 3238 // Maybe push "appendix" to arguments. 3239 if (is_invokedynamic || is_invokehandle) { 3240 Label Ldone; 3241 __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63); 3242 __ beq(CCR0, Ldone); 3243 // Push "appendix" (MethodType, CallSite, etc.). 3244 // This must be done before we get the receiver, 3245 // since the parameter_size includes it. 3246 __ load_resolved_reference_at_index(Rscratch, Rindex); 3247 __ verify_oop(Rscratch); 3248 __ push_ptr(Rscratch); 3249 __ bind(Ldone); 3250 } 3251 3252 // Load receiver if needed (after appendix is pushed so parameter size is correct). 3253 if (load_receiver) { 3254 const Register Rparam_count = Rscratch; 3255 __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask); 3256 __ load_receiver(Rparam_count, Rrecv); 3257 __ verify_oop(Rrecv); 3258 } 3259 3260 // Get return address. 3261 { 3262 Register Rtable_addr = Rscratch; 3263 Register Rret_type = Rret_addr; 3264 address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 3265 3266 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3267 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3268 __ load_dispatch_table(Rtable_addr, (address*)table_addr); 3269 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3270 // Get return address. 3271 __ ldx(Rret_addr, Rtable_addr, Rret_type); 3272 } 3273 } 3274 3275 // Helper for virtual calls. Load target out of vtable and jump off! 3276 // Kills all passed registers. 3277 void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) { 3278 3279 assert_different_registers(Rrecv_klass, Rtemp, Rret); 3280 const Register Rtarget_method = Rindex; 3281 3282 // Get target method & entry point. 3283 const int base = InstanceKlass::vtable_start_offset() * wordSize; 3284 // Calc vtable addr scale the vtable index by 8. 3285 __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size() * wordSize)); 3286 // Load target. 3287 __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes()); 3288 __ ldx(Rtarget_method, Rindex, Rrecv_klass); 3289 // Argument and return type profiling. 3290 __ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true); 3291 __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */); 3292 } 3293 3294 // Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time. 3295 void TemplateTable::invokevirtual(int byte_no) { 3296 transition(vtos, vtos); 3297 3298 Register Rtable_addr = R11_scratch1, 3299 Rret_type = R12_scratch2, 3300 Rret_addr = R5_ARG3, 3301 Rflags = R22_tmp2, // Should survive C call. 3302 Rrecv = R3_ARG1, 3303 Rrecv_klass = Rrecv, 3304 Rvtableindex_or_method = R31, // Should survive C call. 3305 Rnum_params = R4_ARG2, 3306 Rnew_bc = R6_ARG4; 3307 3308 Label LnotFinal; 3309 3310 load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false); 3311 3312 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3313 __ bfalse(CCR0, LnotFinal); 3314 3315 patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2); 3316 invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2); 3317 3318 __ align(32, 12); 3319 __ bind(LnotFinal); 3320 // Load "this" pointer (receiver). 3321 __ rldicl(Rnum_params, Rflags, 64, 48); 3322 __ load_receiver(Rnum_params, Rrecv); 3323 __ verify_oop(Rrecv); 3324 3325 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3326 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3327 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3328 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3329 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3330 __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1); 3331 __ load_klass(Rrecv_klass, Rrecv); 3332 __ verify_klass_ptr(Rrecv_klass); 3333 __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false); 3334 3335 generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1); 3336 } 3337 3338 void TemplateTable::fast_invokevfinal(int byte_no) { 3339 transition(vtos, vtos); 3340 3341 assert(byte_no == f2_byte, "use this argument"); 3342 Register Rflags = R22_tmp2, 3343 Rmethod = R31; 3344 load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false); 3345 invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2); 3346 } 3347 3348 void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) { 3349 3350 assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2); 3351 3352 // Load receiver from stack slot. 3353 Register Rrecv = Rscratch2; 3354 Register Rnum_params = Rrecv; 3355 3356 __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod); 3357 __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params); 3358 3359 // Get return address. 3360 Register Rtable_addr = Rscratch1, 3361 Rret_addr = Rflags, 3362 Rret_type = Rret_addr; 3363 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3364 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3365 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3366 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3367 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3368 3369 // Load receiver and receiver NULL check. 3370 __ load_receiver(Rnum_params, Rrecv); 3371 __ null_check_throw(Rrecv, -1, Rscratch1); 3372 3373 __ profile_final_call(Rrecv, Rscratch1); 3374 // Argument and return type profiling. 3375 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3376 3377 // Do the call. 3378 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2); 3379 } 3380 3381 void TemplateTable::invokespecial(int byte_no) { 3382 assert(byte_no == f1_byte, "use this argument"); 3383 transition(vtos, vtos); 3384 3385 Register Rtable_addr = R3_ARG1, 3386 Rret_addr = R4_ARG2, 3387 Rflags = R5_ARG3, 3388 Rreceiver = R6_ARG4, 3389 Rmethod = R31; 3390 3391 prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1); 3392 3393 // Receiver NULL check. 3394 __ null_check_throw(Rreceiver, -1, R11_scratch1); 3395 3396 __ profile_call(R11_scratch1, R12_scratch2); 3397 // Argument and return type profiling. 3398 __ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false); 3399 __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2); 3400 } 3401 3402 void TemplateTable::invokestatic(int byte_no) { 3403 assert(byte_no == f1_byte, "use this argument"); 3404 transition(vtos, vtos); 3405 3406 Register Rtable_addr = R3_ARG1, 3407 Rret_addr = R4_ARG2, 3408 Rflags = R5_ARG3; 3409 3410 prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1); 3411 3412 __ profile_call(R11_scratch1, R12_scratch2); 3413 // Argument and return type profiling. 3414 __ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false); 3415 __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2); 3416 } 3417 3418 void TemplateTable::invokeinterface_object_method(Register Rrecv_klass, 3419 Register Rret, 3420 Register Rflags, 3421 Register Rindex, 3422 Register Rtemp1, 3423 Register Rtemp2) { 3424 3425 assert_different_registers(Rindex, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2); 3426 Label LnotFinal; 3427 3428 // Check for vfinal. 3429 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3430 __ bfalse(CCR0, LnotFinal); 3431 3432 Register Rscratch = Rflags; // Rflags is dead now. 3433 3434 // Final call case. 3435 __ profile_final_call(Rtemp1, Rscratch); 3436 // Argument and return type profiling. 3437 __ profile_arguments_type(Rindex, Rscratch, Rrecv_klass /* scratch */, true); 3438 // Do the final call - the index (f2) contains the method. 3439 __ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */); 3440 3441 // Non-final callc case. 3442 __ bind(LnotFinal); 3443 __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false); 3444 generate_vtable_call(Rrecv_klass, Rindex, Rret, Rscratch); 3445 } 3446 3447 void TemplateTable::invokeinterface(int byte_no) { 3448 assert(byte_no == f1_byte, "use this argument"); 3449 transition(vtos, vtos); 3450 3451 const Register Rscratch1 = R11_scratch1, 3452 Rscratch2 = R12_scratch2, 3453 Rscratch3 = R9_ARG7, 3454 Rscratch4 = R10_ARG8, 3455 Rtable_addr = Rscratch2, 3456 Rinterface_klass = R5_ARG3, 3457 Rret_type = R8_ARG6, 3458 Rret_addr = Rret_type, 3459 Rindex = R6_ARG4, 3460 Rreceiver = R4_ARG2, 3461 Rrecv_klass = Rreceiver, 3462 Rflags = R7_ARG5; 3463 3464 prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rindex, Rreceiver, Rflags, Rscratch1); 3465 3466 // Get receiver klass. 3467 __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch3); 3468 __ load_klass(Rrecv_klass, Rreceiver); 3469 3470 // Check corner case object method. 3471 Label LobjectMethod; 3472 3473 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift); 3474 __ btrue(CCR0, LobjectMethod); 3475 3476 // Fallthrough: The normal invokeinterface case. 3477 __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false); 3478 3479 // Find entry point to call. 3480 Label Lthrow_icc, Lthrow_ame; 3481 // Result will be returned in Rindex. 3482 __ mr(Rscratch4, Rrecv_klass); 3483 __ mr(Rscratch3, Rindex); 3484 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rindex, Rscratch1, Rscratch2, Lthrow_icc); 3485 3486 __ cmpdi(CCR0, Rindex, 0); 3487 __ beq(CCR0, Lthrow_ame); 3488 // Found entry. Jump off! 3489 // Argument and return type profiling. 3490 __ profile_arguments_type(Rindex, Rscratch1, Rscratch2, true); 3491 __ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2); 3492 3493 // Vtable entry was NULL => Throw abstract method error. 3494 __ bind(Lthrow_ame); 3495 __ mr(Rrecv_klass, Rscratch4); 3496 __ mr(Rindex, Rscratch3); 3497 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 3498 3499 // Interface was not found => Throw incompatible class change error. 3500 __ bind(Lthrow_icc); 3501 __ mr(Rrecv_klass, Rscratch4); 3502 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); 3503 3504 __ should_not_reach_here(); 3505 3506 // Special case of invokeinterface called for virtual method of 3507 // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details: 3508 // The invokeinterface was rewritten to a invokevirtual, hence we have 3509 // to handle this corner case. This code isn't produced by javac, but could 3510 // be produced by another compliant java compiler. 3511 __ bind(LobjectMethod); 3512 invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rindex, Rscratch1, Rscratch2); 3513 } 3514 3515 void TemplateTable::invokedynamic(int byte_no) { 3516 transition(vtos, vtos); 3517 3518 const Register Rret_addr = R3_ARG1, 3519 Rflags = R4_ARG2, 3520 Rmethod = R22_tmp2, 3521 Rscratch1 = R11_scratch1, 3522 Rscratch2 = R12_scratch2; 3523 3524 if (!EnableInvokeDynamic) { 3525 // We should not encounter this bytecode if !EnableInvokeDynamic. 3526 // The verifier will stop it. However, if we get past the verifier, 3527 // this will stop the thread in a reasonable way, without crashing the JVM. 3528 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); 3529 // The call_VM checks for exception, so we should never return here. 3530 __ should_not_reach_here(); 3531 return; 3532 } 3533 3534 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2); 3535 3536 // Profile this call. 3537 __ profile_call(Rscratch1, Rscratch2); 3538 3539 // Off we go. With the new method handles, we don't jump to a method handle 3540 // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens 3541 // to be the callsite object the bootstrap method returned. This is passed to a 3542 // "link" method which does the dispatch (Most likely just grabs the MH stored 3543 // inside the callsite and does an invokehandle). 3544 // Argument and return type profiling. 3545 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false); 3546 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3547 } 3548 3549 void TemplateTable::invokehandle(int byte_no) { 3550 transition(vtos, vtos); 3551 3552 const Register Rret_addr = R3_ARG1, 3553 Rflags = R4_ARG2, 3554 Rrecv = R5_ARG3, 3555 Rmethod = R22_tmp2, 3556 Rscratch1 = R11_scratch1, 3557 Rscratch2 = R12_scratch2; 3558 3559 if (!EnableInvokeDynamic) { 3560 // Rewriter does not generate this bytecode. 3561 __ should_not_reach_here(); 3562 return; 3563 } 3564 3565 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2); 3566 __ verify_method_ptr(Rmethod); 3567 __ null_check_throw(Rrecv, -1, Rscratch2); 3568 3569 __ profile_final_call(Rrecv, Rscratch1); 3570 3571 // Still no call from handle => We call the method handle interpreter here. 3572 // Argument and return type profiling. 3573 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3574 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3575 } 3576 3577 // ============================================================================= 3578 // Allocation 3579 3580 // Puts allocated obj ref onto the expression stack. 3581 void TemplateTable::_new() { 3582 transition(vtos, atos); 3583 3584 Label Lslow_case, 3585 Ldone, 3586 Linitialize_header, 3587 Lallocate_shared, 3588 Linitialize_object; // Including clearing the fields. 3589 3590 const Register RallocatedObject = R17_tos, 3591 RinstanceKlass = R9_ARG7, 3592 Rscratch = R11_scratch1, 3593 Roffset = R8_ARG6, 3594 Rinstance_size = Roffset, 3595 Rcpool = R4_ARG2, 3596 Rtags = R3_ARG1, 3597 Rindex = R5_ARG3; 3598 3599 const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; 3600 3601 // -------------------------------------------------------------------------- 3602 // Check if fast case is possible. 3603 3604 // Load pointers to const pool and const pool's tags array. 3605 __ get_cpool_and_tags(Rcpool, Rtags); 3606 // Load index of constant pool entry. 3607 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 3608 3609 if (UseTLAB) { 3610 // Make sure the class we're about to instantiate has been resolved 3611 // This is done before loading instanceKlass to be consistent with the order 3612 // how Constant Pool is updated (see ConstantPoolCache::klass_at_put). 3613 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3614 __ lbzx(Rtags, Rindex, Rtags); 3615 3616 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3617 __ bne(CCR0, Lslow_case); 3618 3619 // Get instanceKlass (load from Rcpool + sizeof(ConstantPool) + Rindex*BytesPerWord). 3620 __ sldi(Roffset, Rindex, LogBytesPerWord); 3621 __ addi(Rscratch, Rcpool, sizeof(ConstantPool)); 3622 __ isync(); // Order load of instance Klass wrt. tags. 3623 __ ldx(RinstanceKlass, Roffset, Rscratch); 3624 3625 // Make sure klass is fully initialized and get instance_size. 3626 __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass); 3627 __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass); 3628 3629 __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized); 3630 // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class. 3631 __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0? 3632 3633 __ crnand(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); // slow path bit set or not fully initialized? 3634 __ beq(CCR0, Lslow_case); 3635 3636 // -------------------------------------------------------------------------- 3637 // Fast case: 3638 // Allocate the instance. 3639 // 1) Try to allocate in the TLAB. 3640 // 2) If fail, and the TLAB is not full enough to discard, allocate in the shared Eden. 3641 // 3) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.). 3642 3643 Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits. 3644 Register RnewTopValue = R6_ARG4; 3645 Register RendValue = R7_ARG5; 3646 3647 // Check if we can allocate in the TLAB. 3648 __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3649 __ ld(RendValue, in_bytes(JavaThread::tlab_end_offset()), R16_thread); 3650 3651 __ add(RnewTopValue, Rinstance_size, RoldTopValue); 3652 3653 // If there is enough space, we do not CAS and do not clear. 3654 __ cmpld(CCR0, RnewTopValue, RendValue); 3655 __ bgt(CCR0, allow_shared_alloc ? Lallocate_shared : Lslow_case); 3656 3657 __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3658 3659 if (ZeroTLAB) { 3660 // The fields have already been cleared. 3661 __ b(Linitialize_header); 3662 } else { 3663 // Initialize both the header and fields. 3664 __ b(Linitialize_object); 3665 } 3666 3667 // Fall through: TLAB was too small. 3668 if (allow_shared_alloc) { 3669 Register RtlabWasteLimitValue = R10_ARG8; 3670 Register RfreeValue = RnewTopValue; 3671 3672 __ bind(Lallocate_shared); 3673 // Check if tlab should be discarded (refill_waste_limit >= free). 3674 __ ld(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread); 3675 __ subf(RfreeValue, RoldTopValue, RendValue); 3676 __ srdi(RfreeValue, RfreeValue, LogHeapWordSize); // in dwords 3677 __ cmpld(CCR0, RtlabWasteLimitValue, RfreeValue); 3678 __ bge(CCR0, Lslow_case); 3679 3680 // Increment waste limit to prevent getting stuck on this slow path. 3681 __ addi(RtlabWasteLimitValue, RtlabWasteLimitValue, (int)ThreadLocalAllocBuffer::refill_waste_limit_increment()); 3682 __ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread); 3683 } 3684 // else: No allocation in the shared eden. // fallthru: __ b(Lslow_case); 3685 } 3686 // else: Always go the slow path. 3687 3688 // -------------------------------------------------------------------------- 3689 // slow case 3690 __ bind(Lslow_case); 3691 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex); 3692 3693 if (UseTLAB) { 3694 __ b(Ldone); 3695 // -------------------------------------------------------------------------- 3696 // Init1: Zero out newly allocated memory. 3697 3698 if (!ZeroTLAB || allow_shared_alloc) { 3699 // Clear object fields. 3700 __ bind(Linitialize_object); 3701 3702 // Initialize remaining object fields. 3703 Register Rbase = Rtags; 3704 __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc)); 3705 __ addi(Rbase, RallocatedObject, sizeof(oopDesc)); 3706 __ srdi(Rinstance_size, Rinstance_size, 3); 3707 3708 // Clear out object skipping header. Takes also care of the zero length case. 3709 __ clear_memory_doubleword(Rbase, Rinstance_size); 3710 // fallthru: __ b(Linitialize_header); 3711 } 3712 3713 // -------------------------------------------------------------------------- 3714 // Init2: Initialize the header: mark, klass 3715 __ bind(Linitialize_header); 3716 3717 // Init mark. 3718 if (UseBiasedLocking) { 3719 __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass); 3720 } else { 3721 __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0); 3722 } 3723 __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject); 3724 3725 // Init klass. 3726 __ store_klass_gap(RallocatedObject); 3727 __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms) 3728 3729 // Check and trigger dtrace event. 3730 { 3731 SkipIfEqualZero skip_if(_masm, Rscratch, &DTraceAllocProbes); 3732 __ push(atos); 3733 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)); 3734 __ pop(atos); 3735 } 3736 } 3737 3738 // continue 3739 __ bind(Ldone); 3740 3741 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3742 __ membar(Assembler::StoreStore); 3743 } 3744 3745 void TemplateTable::newarray() { 3746 transition(itos, atos); 3747 3748 __ lbz(R4, 1, R14_bcp); 3749 __ extsw(R5, R17_tos); 3750 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */); 3751 3752 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3753 __ membar(Assembler::StoreStore); 3754 } 3755 3756 void TemplateTable::anewarray() { 3757 transition(itos, atos); 3758 3759 __ get_constant_pool(R4); 3760 __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned); 3761 __ extsw(R6, R17_tos); // size 3762 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */); 3763 3764 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3765 __ membar(Assembler::StoreStore); 3766 } 3767 3768 // Allocate a multi dimensional array 3769 void TemplateTable::multianewarray() { 3770 transition(vtos, atos); 3771 3772 Register Rptr = R31; // Needs to survive C call. 3773 3774 // Put ndims * wordSize into frame temp slot 3775 __ lbz(Rptr, 3, R14_bcp); 3776 __ sldi(Rptr, Rptr, Interpreter::logStackElementSize); 3777 // Esp points past last_dim, so set to R4 to first_dim address. 3778 __ add(R4, Rptr, R15_esp); 3779 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */); 3780 // Pop all dimensions off the stack. 3781 __ add(R15_esp, Rptr, R15_esp); 3782 3783 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3784 __ membar(Assembler::StoreStore); 3785 } 3786 3787 void TemplateTable::arraylength() { 3788 transition(atos, itos); 3789 3790 Label LnoException; 3791 __ verify_oop(R17_tos); 3792 __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1); 3793 __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos); 3794 } 3795 3796 // ============================================================================ 3797 // Typechecks 3798 3799 void TemplateTable::checkcast() { 3800 transition(atos, atos); 3801 3802 Label Ldone, Lis_null, Lquicked, Lresolved; 3803 Register Roffset = R6_ARG4, 3804 RobjKlass = R4_ARG2, 3805 RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register. 3806 Rcpool = R11_scratch1, 3807 Rtags = R12_scratch2; 3808 3809 // Null does not pass. 3810 __ cmpdi(CCR0, R17_tos, 0); 3811 __ beq(CCR0, Lis_null); 3812 3813 // Get constant pool tag to find out if the bytecode has already been "quickened". 3814 __ get_cpool_and_tags(Rcpool, Rtags); 3815 3816 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3817 3818 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3819 __ lbzx(Rtags, Rtags, Roffset); 3820 3821 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3822 __ beq(CCR0, Lquicked); 3823 3824 // Call into the VM to "quicken" instanceof. 3825 __ push_ptr(); // for GC 3826 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3827 __ get_vm_result_2(RspecifiedKlass); 3828 __ pop_ptr(); // Restore receiver. 3829 __ b(Lresolved); 3830 3831 // Extract target class from constant pool. 3832 __ bind(Lquicked); 3833 __ sldi(Roffset, Roffset, LogBytesPerWord); 3834 __ addi(Rcpool, Rcpool, sizeof(ConstantPool)); 3835 __ isync(); // Order load of specified Klass wrt. tags. 3836 __ ldx(RspecifiedKlass, Rcpool, Roffset); 3837 3838 // Do the checkcast. 3839 __ bind(Lresolved); 3840 // Get value klass in RobjKlass. 3841 __ load_klass(RobjKlass, R17_tos); 3842 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3843 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3844 3845 // Not a subtype; so must throw exception 3846 // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention. 3847 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry); 3848 __ mtctr(R11_scratch1); 3849 __ bctr(); 3850 3851 // Profile the null case. 3852 __ align(32, 12); 3853 __ bind(Lis_null); 3854 __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch. 3855 3856 __ align(32, 12); 3857 __ bind(Ldone); 3858 } 3859 3860 // Output: 3861 // - tos == 0: Obj was null or not an instance of class. 3862 // - tos == 1: Obj was an instance of class. 3863 void TemplateTable::instanceof() { 3864 transition(atos, itos); 3865 3866 Label Ldone, Lis_null, Lquicked, Lresolved; 3867 Register Roffset = R5_ARG3, 3868 RobjKlass = R4_ARG2, 3869 RspecifiedKlass = R6_ARG4, // Generate_ClassCastException_verbose_handler will expect the value in this register. 3870 Rcpool = R11_scratch1, 3871 Rtags = R12_scratch2; 3872 3873 // Null does not pass. 3874 __ cmpdi(CCR0, R17_tos, 0); 3875 __ beq(CCR0, Lis_null); 3876 3877 // Get constant pool tag to find out if the bytecode has already been "quickened". 3878 __ get_cpool_and_tags(Rcpool, Rtags); 3879 3880 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3881 3882 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3883 __ lbzx(Rtags, Rtags, Roffset); 3884 3885 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3886 __ beq(CCR0, Lquicked); 3887 3888 // Call into the VM to "quicken" instanceof. 3889 __ push_ptr(); // for GC 3890 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3891 __ get_vm_result_2(RspecifiedKlass); 3892 __ pop_ptr(); // Restore receiver. 3893 __ b(Lresolved); 3894 3895 // Extract target class from constant pool. 3896 __ bind(Lquicked); 3897 __ sldi(Roffset, Roffset, LogBytesPerWord); 3898 __ addi(Rcpool, Rcpool, sizeof(ConstantPool)); 3899 __ isync(); // Order load of specified Klass wrt. tags. 3900 __ ldx(RspecifiedKlass, Rcpool, Roffset); 3901 3902 // Do the checkcast. 3903 __ bind(Lresolved); 3904 // Get value klass in RobjKlass. 3905 __ load_klass(RobjKlass, R17_tos); 3906 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3907 __ li(R17_tos, 1); 3908 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3909 __ li(R17_tos, 0); 3910 3911 if (ProfileInterpreter) { 3912 __ b(Ldone); 3913 } 3914 3915 // Profile the null case. 3916 __ align(32, 12); 3917 __ bind(Lis_null); 3918 __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch. 3919 3920 __ align(32, 12); 3921 __ bind(Ldone); 3922 } 3923 3924 // ============================================================================= 3925 // Breakpoints 3926 3927 void TemplateTable::_breakpoint() { 3928 transition(vtos, vtos); 3929 3930 // Get the unpatched byte code. 3931 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp); 3932 __ mr(R31, R3_RET); 3933 3934 // Post the breakpoint event. 3935 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp); 3936 3937 // Complete the execution of original bytecode. 3938 __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos)); 3939 } 3940 3941 // ============================================================================= 3942 // Exceptions 3943 3944 void TemplateTable::athrow() { 3945 transition(atos, vtos); 3946 3947 // Exception oop is in tos 3948 __ verify_oop(R17_tos); 3949 3950 __ null_check_throw(R17_tos, -1, R11_scratch1); 3951 3952 // Throw exception interpreter entry expects exception oop to be in R3. 3953 __ mr(R3_RET, R17_tos); 3954 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry()); 3955 __ mtctr(R11_scratch1); 3956 __ bctr(); 3957 } 3958 3959 // ============================================================================= 3960 // Synchronization 3961 // Searches the basic object lock list on the stack for a free slot 3962 // and uses it to lock the obect in tos. 3963 // 3964 // Recursive locking is enabled by exiting the search if the same 3965 // object is already found in the list. Thus, a new basic lock obj lock 3966 // is allocated "higher up" in the stack and thus is found first 3967 // at next monitor exit. 3968 void TemplateTable::monitorenter() { 3969 transition(atos, vtos); 3970 3971 __ verify_oop(R17_tos); 3972 3973 Register Rcurrent_monitor = R11_scratch1, 3974 Rcurrent_obj = R12_scratch2, 3975 Robj_to_lock = R17_tos, 3976 Rscratch1 = R3_ARG1, 3977 Rscratch2 = R4_ARG2, 3978 Rscratch3 = R5_ARG3, 3979 Rcurrent_obj_addr = R6_ARG4; 3980 3981 // ------------------------------------------------------------------------------ 3982 // Null pointer exception. 3983 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 3984 3985 // Try to acquire a lock on the object. 3986 // Repeat until succeeded (i.e., until monitorenter returns true). 3987 3988 // ------------------------------------------------------------------------------ 3989 // Find a free slot in the monitor block. 3990 Label Lfound, Lexit, Lallocate_new; 3991 ConditionRegister found_free_slot = CCR0, 3992 found_same_obj = CCR1, 3993 reached_limit = CCR6; 3994 { 3995 Label Lloop, Lentry; 3996 Register Rlimit = Rcurrent_monitor; 3997 3998 // Set up search loop - start with topmost monitor. 3999 __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor); 4000 4001 __ ld(Rlimit, 0, R1_SP); 4002 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base 4003 4004 // Check if any slot is present => short cut to allocation if not. 4005 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 4006 __ bgt(reached_limit, Lallocate_new); 4007 4008 // Pre-load topmost slot. 4009 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4010 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4011 // The search loop. 4012 __ bind(Lloop); 4013 // Found free slot? 4014 __ cmpdi(found_free_slot, Rcurrent_obj, 0); 4015 // Is this entry for same obj? If so, stop the search and take the found 4016 // free slot or allocate a new one to enable recursive locking. 4017 __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock); 4018 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 4019 __ beq(found_free_slot, Lexit); 4020 __ beq(found_same_obj, Lallocate_new); 4021 __ bgt(reached_limit, Lallocate_new); 4022 // Check if last allocated BasicLockObj reached. 4023 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4024 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4025 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4026 __ b(Lloop); 4027 } 4028 4029 // ------------------------------------------------------------------------------ 4030 // Check if we found a free slot. 4031 __ bind(Lexit); 4032 4033 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4034 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize); 4035 __ b(Lfound); 4036 4037 // We didn't find a free BasicObjLock => allocate one. 4038 __ align(32, 12); 4039 __ bind(Lallocate_new); 4040 __ add_monitor_to_stack(false, Rscratch1, Rscratch2); 4041 __ mr(Rcurrent_monitor, R26_monitor); 4042 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4043 4044 // ------------------------------------------------------------------------------ 4045 // We now have a slot to lock. 4046 __ bind(Lfound); 4047 4048 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 4049 // The object has already been poped from the stack, so the expression stack looks correct. 4050 __ addi(R14_bcp, R14_bcp, 1); 4051 4052 __ std(Robj_to_lock, 0, Rcurrent_obj_addr); 4053 __ lock_object(Rcurrent_monitor, Robj_to_lock); 4054 4055 // Check if there's enough space on the stack for the monitors after locking. 4056 Label Lskip_stack_check; 4057 // Optimization: If the monitors stack section is less then a std page size (4K) don't run 4058 // the stack check. There should be enough shadow pages to fit that in. 4059 __ ld(Rscratch3, 0, R1_SP); 4060 __ sub(Rscratch3, Rscratch3, R26_monitor); 4061 __ cmpdi(CCR0, Rscratch3, 4*K); 4062 __ blt(CCR0, Lskip_stack_check); 4063 4064 DEBUG_ONLY(__ untested("stack overflow check during monitor enter");) 4065 __ li(Rscratch1, 0); 4066 __ generate_stack_overflow_check_with_compare_and_throw(Rscratch1, Rscratch2); 4067 4068 __ align(32, 12); 4069 __ bind(Lskip_stack_check); 4070 4071 // The bcp has already been incremented. Just need to dispatch to next instruction. 4072 __ dispatch_next(vtos); 4073 } 4074 4075 void TemplateTable::monitorexit() { 4076 transition(atos, vtos); 4077 __ verify_oop(R17_tos); 4078 4079 Register Rcurrent_monitor = R11_scratch1, 4080 Rcurrent_obj = R12_scratch2, 4081 Robj_to_lock = R17_tos, 4082 Rcurrent_obj_addr = R3_ARG1, 4083 Rlimit = R4_ARG2; 4084 Label Lfound, Lillegal_monitor_state; 4085 4086 // Check corner case: unbalanced monitorEnter / Exit. 4087 __ ld(Rlimit, 0, R1_SP); 4088 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base 4089 4090 // Null pointer check. 4091 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 4092 4093 __ cmpld(CCR0, R26_monitor, Rlimit); 4094 __ bgt(CCR0, Lillegal_monitor_state); 4095 4096 // Find the corresponding slot in the monitors stack section. 4097 { 4098 Label Lloop; 4099 4100 // Start with topmost monitor. 4101 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4102 __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes()); 4103 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4104 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4105 4106 __ bind(Lloop); 4107 // Is this entry for same obj? 4108 __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock); 4109 __ beq(CCR0, Lfound); 4110 4111 // Check if last allocated BasicLockObj reached. 4112 4113 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4114 __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit); 4115 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4116 4117 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4118 __ ble(CCR0, Lloop); 4119 } 4120 4121 // Fell through without finding the basic obj lock => throw up! 4122 __ bind(Lillegal_monitor_state); 4123 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 4124 __ should_not_reach_here(); 4125 4126 __ align(32, 12); 4127 __ bind(Lfound); 4128 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, 4129 -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4130 __ unlock_object(Rcurrent_monitor); 4131 } 4132 4133 // ============================================================================ 4134 // Wide bytecodes 4135 4136 // Wide instructions. Simply redirects to the wide entry point for that instruction. 4137 void TemplateTable::wide() { 4138 transition(vtos, vtos); 4139 4140 const Register Rtable = R11_scratch1, 4141 Rindex = R12_scratch2, 4142 Rtmp = R0; 4143 4144 __ lbz(Rindex, 1, R14_bcp); 4145 4146 __ load_dispatch_table(Rtable, Interpreter::_wentry_point); 4147 4148 __ slwi(Rindex, Rindex, LogBytesPerWord); 4149 __ ldx(Rtmp, Rtable, Rindex); 4150 __ mtctr(Rtmp); 4151 __ bctr(); 4152 // Note: the bcp increment step is part of the individual wide bytecode implementations. 4153 } 4154 #endif // !CC_INTERP