1 /* 2 * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. 3 * Copyright 2013, 2014 SAP AG. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterRuntime.hpp" 30 #include "interpreter/templateInterpreter.hpp" 31 #include "interpreter/templateTable.hpp" 32 #include "memory/universe.inline.hpp" 33 #include "oops/objArrayKlass.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "prims/methodHandles.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/stubRoutines.hpp" 38 #include "runtime/synchronizer.hpp" 39 #include "utilities/macros.hpp" 40 41 #ifndef CC_INTERP 42 43 #undef __ 44 #define __ _masm-> 45 46 // ============================================================================ 47 // Misc helpers 48 49 // Do an oop store like *(base + index) = val OR *(base + offset) = val 50 // (only one of both variants is possible at the same time). 51 // Index can be noreg. 52 // Kills: 53 // Rbase, Rtmp 54 static void do_oop_store(InterpreterMacroAssembler* _masm, 55 Register Rbase, 56 RegisterOrConstant offset, 57 Register Rval, // Noreg means always null. 58 Register Rtmp1, 59 Register Rtmp2, 60 Register Rtmp3, 61 BarrierSet::Name barrier, 62 bool precise, 63 bool check_null) { 64 assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase); 65 66 switch (barrier) { 67 #if INCLUDE_ALL_GCS 68 case BarrierSet::G1SATBCT: 69 case BarrierSet::G1SATBCTLogging: 70 { 71 // Load and record the previous value. 72 __ g1_write_barrier_pre(Rbase, offset, 73 Rtmp3, /* holder of pre_val ? */ 74 Rtmp1, Rtmp2, false /* frame */); 75 76 Label Lnull, Ldone; 77 if (Rval != noreg) { 78 if (check_null) { 79 __ cmpdi(CCR0, Rval, 0); 80 __ beq(CCR0, Lnull); 81 } 82 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval must stay uncompressed.*/ Rtmp1); 83 // Mark the card. 84 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 85 __ add(Rbase, offset, Rbase); 86 } 87 __ g1_write_barrier_post(Rbase, Rval, Rtmp1, Rtmp2, Rtmp3, /*filtered (fast path)*/ &Ldone); 88 if (check_null) { __ b(Ldone); } 89 } 90 91 if (Rval == noreg || check_null) { // Store null oop. 92 Register Rnull = Rval; 93 __ bind(Lnull); 94 if (Rval == noreg) { 95 Rnull = Rtmp1; 96 __ li(Rnull, 0); 97 } 98 if (UseCompressedOops) { 99 __ stw(Rnull, offset, Rbase); 100 } else { 101 __ std(Rnull, offset, Rbase); 102 } 103 } 104 __ bind(Ldone); 105 } 106 break; 107 #endif // INCLUDE_ALL_GCS 108 case BarrierSet::CardTableModRef: 109 case BarrierSet::CardTableExtension: 110 { 111 Label Lnull, Ldone; 112 if (Rval != noreg) { 113 if (check_null) { 114 __ cmpdi(CCR0, Rval, 0); 115 __ beq(CCR0, Lnull); 116 } 117 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval should better stay uncompressed.*/ Rtmp1); 118 // Mark the card. 119 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 120 __ add(Rbase, offset, Rbase); 121 } 122 __ card_write_barrier_post(Rbase, Rval, Rtmp1); 123 if (check_null) { 124 __ b(Ldone); 125 } 126 } 127 128 if (Rval == noreg || check_null) { // Store null oop. 129 Register Rnull = Rval; 130 __ bind(Lnull); 131 if (Rval == noreg) { 132 Rnull = Rtmp1; 133 __ li(Rnull, 0); 134 } 135 if (UseCompressedOops) { 136 __ stw(Rnull, offset, Rbase); 137 } else { 138 __ std(Rnull, offset, Rbase); 139 } 140 } 141 __ bind(Ldone); 142 } 143 break; 144 case BarrierSet::Epsilon: 145 Unimplemented(); // Should look like CardTableForRS without card mark? 146 break; 147 case BarrierSet::ModRef: 148 case BarrierSet::Other: 149 ShouldNotReachHere(); 150 break; 151 default: 152 ShouldNotReachHere(); 153 } 154 } 155 156 // ============================================================================ 157 // Platform-dependent initialization 158 159 void TemplateTable::pd_initialize() { 160 // No ppc64 specific initialization. 161 } 162 163 Address TemplateTable::at_bcp(int offset) { 164 // Not used on ppc. 165 ShouldNotReachHere(); 166 return Address(); 167 } 168 169 // Patches the current bytecode (ptr to it located in bcp) 170 // in the bytecode stream with a new one. 171 void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) { 172 // With sharing on, may need to test method flag. 173 if (!RewriteBytecodes) return; 174 Label L_patch_done; 175 176 switch (new_bc) { 177 case Bytecodes::_fast_aputfield: 178 case Bytecodes::_fast_bputfield: 179 case Bytecodes::_fast_zputfield: 180 case Bytecodes::_fast_cputfield: 181 case Bytecodes::_fast_dputfield: 182 case Bytecodes::_fast_fputfield: 183 case Bytecodes::_fast_iputfield: 184 case Bytecodes::_fast_lputfield: 185 case Bytecodes::_fast_sputfield: 186 { 187 // We skip bytecode quickening for putfield instructions when 188 // the put_code written to the constant pool cache is zero. 189 // This is required so that every execution of this instruction 190 // calls out to InterpreterRuntime::resolve_get_put to do 191 // additional, required work. 192 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 193 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 194 __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1); 195 // ((*(cache+indices))>>((1+byte_no)*8))&0xFF: 196 #if defined(VM_LITTLE_ENDIAN) 197 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp); 198 #else 199 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp); 200 #endif 201 __ cmpwi(CCR0, Rnew_bc, 0); 202 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 203 __ beq(CCR0, L_patch_done); 204 // __ isync(); // acquire not needed 205 break; 206 } 207 208 default: 209 assert(byte_no == -1, "sanity"); 210 if (load_bc_into_bc_reg) { 211 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 212 } 213 } 214 215 if (JvmtiExport::can_post_breakpoint()) { 216 Label L_fast_patch; 217 __ lbz(Rtemp, 0, R14_bcp); 218 __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint); 219 __ bne(CCR0, L_fast_patch); 220 // Perform the quickening, slowly, in the bowels of the breakpoint table. 221 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc); 222 __ b(L_patch_done); 223 __ bind(L_fast_patch); 224 } 225 226 // Patch bytecode. 227 __ stb(Rnew_bc, 0, R14_bcp); 228 229 __ bind(L_patch_done); 230 } 231 232 // ============================================================================ 233 // Individual instructions 234 235 void TemplateTable::nop() { 236 transition(vtos, vtos); 237 // Nothing to do. 238 } 239 240 void TemplateTable::shouldnotreachhere() { 241 transition(vtos, vtos); 242 __ stop("shouldnotreachhere bytecode"); 243 } 244 245 void TemplateTable::aconst_null() { 246 transition(vtos, atos); 247 __ li(R17_tos, 0); 248 } 249 250 void TemplateTable::iconst(int value) { 251 transition(vtos, itos); 252 assert(value >= -1 && value <= 5, ""); 253 __ li(R17_tos, value); 254 } 255 256 void TemplateTable::lconst(int value) { 257 transition(vtos, ltos); 258 assert(value >= -1 && value <= 5, ""); 259 __ li(R17_tos, value); 260 } 261 262 void TemplateTable::fconst(int value) { 263 transition(vtos, ftos); 264 static float zero = 0.0; 265 static float one = 1.0; 266 static float two = 2.0; 267 switch (value) { 268 default: ShouldNotReachHere(); 269 case 0: { 270 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 271 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 272 break; 273 } 274 case 1: { 275 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 276 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 277 break; 278 } 279 case 2: { 280 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true); 281 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 282 break; 283 } 284 } 285 } 286 287 void TemplateTable::dconst(int value) { 288 transition(vtos, dtos); 289 static double zero = 0.0; 290 static double one = 1.0; 291 switch (value) { 292 case 0: { 293 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 294 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 295 break; 296 } 297 case 1: { 298 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 299 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 300 break; 301 } 302 default: ShouldNotReachHere(); 303 } 304 } 305 306 void TemplateTable::bipush() { 307 transition(vtos, itos); 308 __ lbz(R17_tos, 1, R14_bcp); 309 __ extsb(R17_tos, R17_tos); 310 } 311 312 void TemplateTable::sipush() { 313 transition(vtos, itos); 314 __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed); 315 } 316 317 void TemplateTable::ldc(bool wide) { 318 Register Rscratch1 = R11_scratch1, 319 Rscratch2 = R12_scratch2, 320 Rcpool = R3_ARG1; 321 322 transition(vtos, vtos); 323 Label notInt, notClass, exit; 324 325 __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags. 326 if (wide) { // Read index. 327 __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned); 328 } else { 329 __ lbz(Rscratch1, 1, R14_bcp); 330 } 331 332 const int base_offset = ConstantPool::header_size() * wordSize; 333 const int tags_offset = Array<u1>::base_offset_in_bytes(); 334 335 // Get type from tags. 336 __ addi(Rscratch2, Rscratch2, tags_offset); 337 __ lbzx(Rscratch2, Rscratch2, Rscratch1); 338 339 __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class? 340 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state? 341 __ cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); 342 343 // Resolved class - need to call vm to get java mirror of the class. 344 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class); 345 __ crnor(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); // Neither resolved class nor unresolved case from above? 346 __ beq(CCR0, notClass); 347 348 __ li(R4, wide ? 1 : 0); 349 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4); 350 __ push(atos); 351 __ b(exit); 352 353 __ align(32, 12); 354 __ bind(notClass); 355 __ addi(Rcpool, Rcpool, base_offset); 356 __ sldi(Rscratch1, Rscratch1, LogBytesPerWord); 357 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer); 358 __ bne(CCR0, notInt); 359 __ lwax(R17_tos, Rcpool, Rscratch1); 360 __ push(itos); 361 __ b(exit); 362 363 __ align(32, 12); 364 __ bind(notInt); 365 #ifdef ASSERT 366 // String and Object are rewritten to fast_aldc 367 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float); 368 __ asm_assert_eq("unexpected type", 0x8765); 369 #endif 370 __ lfsx(F15_ftos, Rcpool, Rscratch1); 371 __ push(ftos); 372 373 __ align(32, 12); 374 __ bind(exit); 375 } 376 377 // Fast path for caching oop constants. 378 void TemplateTable::fast_aldc(bool wide) { 379 transition(vtos, atos); 380 381 int index_size = wide ? sizeof(u2) : sizeof(u1); 382 const Register Rscratch = R11_scratch1; 383 Label resolved; 384 385 // We are resolved if the resolved reference cache entry contains a 386 // non-null object (CallSite, etc.) 387 __ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index. 388 __ load_resolved_reference_at_index(R17_tos, Rscratch); 389 __ cmpdi(CCR0, R17_tos, 0); 390 __ bne(CCR0, resolved); 391 __ load_const_optimized(R3_ARG1, (int)bytecode()); 392 393 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 394 395 // First time invocation - must resolve first. 396 __ call_VM(R17_tos, entry, R3_ARG1); 397 398 __ align(32, 12); 399 __ bind(resolved); 400 __ verify_oop(R17_tos); 401 } 402 403 void TemplateTable::ldc2_w() { 404 transition(vtos, vtos); 405 Label Llong, Lexit; 406 407 Register Rindex = R11_scratch1, 408 Rcpool = R12_scratch2, 409 Rtag = R3_ARG1; 410 __ get_cpool_and_tags(Rcpool, Rtag); 411 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 412 413 const int base_offset = ConstantPool::header_size() * wordSize; 414 const int tags_offset = Array<u1>::base_offset_in_bytes(); 415 // Get type from tags. 416 __ addi(Rcpool, Rcpool, base_offset); 417 __ addi(Rtag, Rtag, tags_offset); 418 419 __ lbzx(Rtag, Rtag, Rindex); 420 421 __ sldi(Rindex, Rindex, LogBytesPerWord); 422 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double); 423 __ bne(CCR0, Llong); 424 // A double can be placed at word-aligned locations in the constant pool. 425 // Check out Conversions.java for an example. 426 // Also ConstantPool::header_size() is 20, which makes it very difficult 427 // to double-align double on the constant pool. SG, 11/7/97 428 __ lfdx(F15_ftos, Rcpool, Rindex); 429 __ push(dtos); 430 __ b(Lexit); 431 432 __ bind(Llong); 433 __ ldx(R17_tos, Rcpool, Rindex); 434 __ push(ltos); 435 436 __ bind(Lexit); 437 } 438 439 // Get the locals index located in the bytecode stream at bcp + offset. 440 void TemplateTable::locals_index(Register Rdst, int offset) { 441 __ lbz(Rdst, offset, R14_bcp); 442 } 443 444 void TemplateTable::iload() { 445 transition(vtos, itos); 446 447 // Get the local value into tos 448 const Register Rindex = R22_tmp2; 449 locals_index(Rindex); 450 451 // Rewrite iload,iload pair into fast_iload2 452 // iload,caload pair into fast_icaload 453 if (RewriteFrequentPairs) { 454 Label Lrewrite, Ldone; 455 Register Rnext_byte = R3_ARG1, 456 Rrewrite_to = R6_ARG4, 457 Rscratch = R11_scratch1; 458 459 // get next byte 460 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp); 461 462 // if _iload, wait to rewrite to iload2. We only want to rewrite the 463 // last two iloads in a pair. Comparing against fast_iload means that 464 // the next bytecode is neither an iload or a caload, and therefore 465 // an iload pair. 466 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload); 467 __ beq(CCR0, Ldone); 468 469 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 470 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2); 471 __ beq(CCR1, Lrewrite); 472 473 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload); 474 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload); 475 __ beq(CCR0, Lrewrite); 476 477 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 478 479 __ bind(Lrewrite); 480 patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false); 481 __ bind(Ldone); 482 } 483 484 __ load_local_int(R17_tos, Rindex, Rindex); 485 } 486 487 // Load 2 integers in a row without dispatching 488 void TemplateTable::fast_iload2() { 489 transition(vtos, itos); 490 491 __ lbz(R3_ARG1, 1, R14_bcp); 492 __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp); 493 494 __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1); 495 __ load_local_int(R17_tos, R12_scratch2, R17_tos); 496 __ push_i(R3_ARG1); 497 } 498 499 void TemplateTable::fast_iload() { 500 transition(vtos, itos); 501 // Get the local value into tos 502 503 const Register Rindex = R11_scratch1; 504 locals_index(Rindex); 505 __ load_local_int(R17_tos, Rindex, Rindex); 506 } 507 508 // Load a local variable type long from locals area to TOS cache register. 509 // Local index resides in bytecodestream. 510 void TemplateTable::lload() { 511 transition(vtos, ltos); 512 513 const Register Rindex = R11_scratch1; 514 locals_index(Rindex); 515 __ load_local_long(R17_tos, Rindex, Rindex); 516 } 517 518 void TemplateTable::fload() { 519 transition(vtos, ftos); 520 521 const Register Rindex = R11_scratch1; 522 locals_index(Rindex); 523 __ load_local_float(F15_ftos, Rindex, Rindex); 524 } 525 526 void TemplateTable::dload() { 527 transition(vtos, dtos); 528 529 const Register Rindex = R11_scratch1; 530 locals_index(Rindex); 531 __ load_local_double(F15_ftos, Rindex, Rindex); 532 } 533 534 void TemplateTable::aload() { 535 transition(vtos, atos); 536 537 const Register Rindex = R11_scratch1; 538 locals_index(Rindex); 539 __ load_local_ptr(R17_tos, Rindex, Rindex); 540 } 541 542 void TemplateTable::locals_index_wide(Register Rdst) { 543 // Offset is 2, not 1, because Lbcp points to wide prefix code. 544 __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned); 545 } 546 547 void TemplateTable::wide_iload() { 548 // Get the local value into tos. 549 550 const Register Rindex = R11_scratch1; 551 locals_index_wide(Rindex); 552 __ load_local_int(R17_tos, Rindex, Rindex); 553 } 554 555 void TemplateTable::wide_lload() { 556 transition(vtos, ltos); 557 558 const Register Rindex = R11_scratch1; 559 locals_index_wide(Rindex); 560 __ load_local_long(R17_tos, Rindex, Rindex); 561 } 562 563 void TemplateTable::wide_fload() { 564 transition(vtos, ftos); 565 566 const Register Rindex = R11_scratch1; 567 locals_index_wide(Rindex); 568 __ load_local_float(F15_ftos, Rindex, Rindex); 569 } 570 571 void TemplateTable::wide_dload() { 572 transition(vtos, dtos); 573 574 const Register Rindex = R11_scratch1; 575 locals_index_wide(Rindex); 576 __ load_local_double(F15_ftos, Rindex, Rindex); 577 } 578 579 void TemplateTable::wide_aload() { 580 transition(vtos, atos); 581 582 const Register Rindex = R11_scratch1; 583 locals_index_wide(Rindex); 584 __ load_local_ptr(R17_tos, Rindex, Rindex); 585 } 586 587 void TemplateTable::iaload() { 588 transition(itos, itos); 589 590 const Register Rload_addr = R3_ARG1, 591 Rarray = R4_ARG2, 592 Rtemp = R5_ARG3; 593 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 594 __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr); 595 } 596 597 void TemplateTable::laload() { 598 transition(itos, ltos); 599 600 const Register Rload_addr = R3_ARG1, 601 Rarray = R4_ARG2, 602 Rtemp = R5_ARG3; 603 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 604 __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr); 605 } 606 607 void TemplateTable::faload() { 608 transition(itos, ftos); 609 610 const Register Rload_addr = R3_ARG1, 611 Rarray = R4_ARG2, 612 Rtemp = R5_ARG3; 613 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 614 __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr); 615 } 616 617 void TemplateTable::daload() { 618 transition(itos, dtos); 619 620 const Register Rload_addr = R3_ARG1, 621 Rarray = R4_ARG2, 622 Rtemp = R5_ARG3; 623 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 624 __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr); 625 } 626 627 void TemplateTable::aaload() { 628 transition(itos, atos); 629 630 // tos: index 631 // result tos: array 632 const Register Rload_addr = R3_ARG1, 633 Rarray = R4_ARG2, 634 Rtemp = R5_ARG3; 635 __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr); 636 __ load_heap_oop(R17_tos, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rload_addr); 637 __ verify_oop(R17_tos); 638 //__ dcbt(R17_tos); // prefetch 639 } 640 641 void TemplateTable::baload() { 642 transition(itos, itos); 643 644 const Register Rload_addr = R3_ARG1, 645 Rarray = R4_ARG2, 646 Rtemp = R5_ARG3; 647 __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr); 648 __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr); 649 __ extsb(R17_tos, R17_tos); 650 } 651 652 void TemplateTable::caload() { 653 transition(itos, itos); 654 655 const Register Rload_addr = R3_ARG1, 656 Rarray = R4_ARG2, 657 Rtemp = R5_ARG3; 658 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 659 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 660 } 661 662 // Iload followed by caload frequent pair. 663 void TemplateTable::fast_icaload() { 664 transition(vtos, itos); 665 666 const Register Rload_addr = R3_ARG1, 667 Rarray = R4_ARG2, 668 Rtemp = R11_scratch1; 669 670 locals_index(R17_tos); 671 __ load_local_int(R17_tos, Rtemp, R17_tos); 672 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 673 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 674 } 675 676 void TemplateTable::saload() { 677 transition(itos, itos); 678 679 const Register Rload_addr = R11_scratch1, 680 Rarray = R12_scratch2, 681 Rtemp = R3_ARG1; 682 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 683 __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr); 684 } 685 686 void TemplateTable::iload(int n) { 687 transition(vtos, itos); 688 689 __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 690 } 691 692 void TemplateTable::lload(int n) { 693 transition(vtos, ltos); 694 695 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 696 } 697 698 void TemplateTable::fload(int n) { 699 transition(vtos, ftos); 700 701 __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 702 } 703 704 void TemplateTable::dload(int n) { 705 transition(vtos, dtos); 706 707 __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 708 } 709 710 void TemplateTable::aload(int n) { 711 transition(vtos, atos); 712 713 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 714 } 715 716 void TemplateTable::aload_0() { 717 transition(vtos, atos); 718 // According to bytecode histograms, the pairs: 719 // 720 // _aload_0, _fast_igetfield 721 // _aload_0, _fast_agetfield 722 // _aload_0, _fast_fgetfield 723 // 724 // occur frequently. If RewriteFrequentPairs is set, the (slow) 725 // _aload_0 bytecode checks if the next bytecode is either 726 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then 727 // rewrites the current bytecode into a pair bytecode; otherwise it 728 // rewrites the current bytecode into _0 that doesn't do 729 // the pair check anymore. 730 // 731 // Note: If the next bytecode is _getfield, the rewrite must be 732 // delayed, otherwise we may miss an opportunity for a pair. 733 // 734 // Also rewrite frequent pairs 735 // aload_0, aload_1 736 // aload_0, iload_1 737 // These bytecodes with a small amount of code are most profitable 738 // to rewrite. 739 740 if (RewriteFrequentPairs) { 741 742 Label Lrewrite, Ldont_rewrite; 743 Register Rnext_byte = R3_ARG1, 744 Rrewrite_to = R6_ARG4, 745 Rscratch = R11_scratch1; 746 747 // Get next byte. 748 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp); 749 750 // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair. 751 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield); 752 __ beq(CCR0, Ldont_rewrite); 753 754 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield); 755 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0); 756 __ beq(CCR1, Lrewrite); 757 758 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield); 759 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0); 760 __ beq(CCR0, Lrewrite); 761 762 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield); 763 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0); 764 __ beq(CCR1, Lrewrite); 765 766 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0); 767 768 __ bind(Lrewrite); 769 patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false); 770 __ bind(Ldont_rewrite); 771 } 772 773 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). 774 aload(0); 775 } 776 777 void TemplateTable::istore() { 778 transition(itos, vtos); 779 780 const Register Rindex = R11_scratch1; 781 locals_index(Rindex); 782 __ store_local_int(R17_tos, Rindex); 783 } 784 785 void TemplateTable::lstore() { 786 transition(ltos, vtos); 787 const Register Rindex = R11_scratch1; 788 locals_index(Rindex); 789 __ store_local_long(R17_tos, Rindex); 790 } 791 792 void TemplateTable::fstore() { 793 transition(ftos, vtos); 794 795 const Register Rindex = R11_scratch1; 796 locals_index(Rindex); 797 __ store_local_float(F15_ftos, Rindex); 798 } 799 800 void TemplateTable::dstore() { 801 transition(dtos, vtos); 802 803 const Register Rindex = R11_scratch1; 804 locals_index(Rindex); 805 __ store_local_double(F15_ftos, Rindex); 806 } 807 808 void TemplateTable::astore() { 809 transition(vtos, vtos); 810 811 const Register Rindex = R11_scratch1; 812 __ pop_ptr(); 813 __ verify_oop_or_return_address(R17_tos, Rindex); 814 locals_index(Rindex); 815 __ store_local_ptr(R17_tos, Rindex); 816 } 817 818 void TemplateTable::wide_istore() { 819 transition(vtos, vtos); 820 821 const Register Rindex = R11_scratch1; 822 __ pop_i(); 823 locals_index_wide(Rindex); 824 __ store_local_int(R17_tos, Rindex); 825 } 826 827 void TemplateTable::wide_lstore() { 828 transition(vtos, vtos); 829 830 const Register Rindex = R11_scratch1; 831 __ pop_l(); 832 locals_index_wide(Rindex); 833 __ store_local_long(R17_tos, Rindex); 834 } 835 836 void TemplateTable::wide_fstore() { 837 transition(vtos, vtos); 838 839 const Register Rindex = R11_scratch1; 840 __ pop_f(); 841 locals_index_wide(Rindex); 842 __ store_local_float(F15_ftos, Rindex); 843 } 844 845 void TemplateTable::wide_dstore() { 846 transition(vtos, vtos); 847 848 const Register Rindex = R11_scratch1; 849 __ pop_d(); 850 locals_index_wide(Rindex); 851 __ store_local_double(F15_ftos, Rindex); 852 } 853 854 void TemplateTable::wide_astore() { 855 transition(vtos, vtos); 856 857 const Register Rindex = R11_scratch1; 858 __ pop_ptr(); 859 __ verify_oop_or_return_address(R17_tos, Rindex); 860 locals_index_wide(Rindex); 861 __ store_local_ptr(R17_tos, Rindex); 862 } 863 864 void TemplateTable::iastore() { 865 transition(itos, vtos); 866 867 const Register Rindex = R3_ARG1, 868 Rstore_addr = R4_ARG2, 869 Rarray = R5_ARG3, 870 Rtemp = R6_ARG4; 871 __ pop_i(Rindex); 872 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 873 __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr); 874 } 875 876 void TemplateTable::lastore() { 877 transition(ltos, vtos); 878 879 const Register Rindex = R3_ARG1, 880 Rstore_addr = R4_ARG2, 881 Rarray = R5_ARG3, 882 Rtemp = R6_ARG4; 883 __ pop_i(Rindex); 884 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 885 __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr); 886 } 887 888 void TemplateTable::fastore() { 889 transition(ftos, vtos); 890 891 const Register Rindex = R3_ARG1, 892 Rstore_addr = R4_ARG2, 893 Rarray = R5_ARG3, 894 Rtemp = R6_ARG4; 895 __ pop_i(Rindex); 896 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 897 __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr); 898 } 899 900 void TemplateTable::dastore() { 901 transition(dtos, vtos); 902 903 const Register Rindex = R3_ARG1, 904 Rstore_addr = R4_ARG2, 905 Rarray = R5_ARG3, 906 Rtemp = R6_ARG4; 907 __ pop_i(Rindex); 908 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 909 __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr); 910 } 911 912 // Pop 3 values from the stack and... 913 void TemplateTable::aastore() { 914 transition(vtos, vtos); 915 916 Label Lstore_ok, Lis_null, Ldone; 917 const Register Rindex = R3_ARG1, 918 Rarray = R4_ARG2, 919 Rscratch = R11_scratch1, 920 Rscratch2 = R12_scratch2, 921 Rarray_klass = R5_ARG3, 922 Rarray_element_klass = Rarray_klass, 923 Rvalue_klass = R6_ARG4, 924 Rstore_addr = R31; // Use register which survives VM call. 925 926 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store. 927 __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index. 928 __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp); // Get array. 929 930 __ verify_oop(R17_tos); 931 __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr); 932 // Rindex is dead! 933 Register Rscratch3 = Rindex; 934 935 // Do array store check - check for NULL value first. 936 __ cmpdi(CCR0, R17_tos, 0); 937 __ beq(CCR0, Lis_null); 938 939 __ load_klass(Rarray_klass, Rarray); 940 __ load_klass(Rvalue_klass, R17_tos); 941 942 // Do fast instanceof cache test. 943 __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass); 944 945 // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure. 946 __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok); 947 948 // Fell through: subtype check failed => throw an exception. 949 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry); 950 __ mtctr(R11_scratch1); 951 __ bctr(); 952 953 __ bind(Lis_null); 954 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */, 955 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 956 __ profile_null_seen(Rscratch, Rscratch2); 957 __ b(Ldone); 958 959 // Store is OK. 960 __ bind(Lstore_ok); 961 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */, 962 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 963 964 __ bind(Ldone); 965 // Adjust sp (pops array, index and value). 966 __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize); 967 } 968 969 void TemplateTable::bastore() { 970 transition(itos, vtos); 971 972 const Register Rindex = R11_scratch1, 973 Rarray = R12_scratch2, 974 Rscratch = R3_ARG1; 975 __ pop_i(Rindex); 976 __ pop_ptr(Rarray); 977 // tos: val 978 979 // Need to check whether array is boolean or byte 980 // since both types share the bastore bytecode. 981 __ load_klass(Rscratch, Rarray); 982 __ lwz(Rscratch, in_bytes(Klass::layout_helper_offset()), Rscratch); 983 int diffbit = exact_log2(Klass::layout_helper_boolean_diffbit()); 984 __ testbitdi(CCR0, R0, Rscratch, diffbit); 985 Label L_skip; 986 __ bfalse(CCR0, L_skip); 987 __ andi(R17_tos, R17_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1 988 __ bind(L_skip); 989 990 __ index_check_without_pop(Rarray, Rindex, 0, Rscratch, Rarray); 991 __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray); 992 } 993 994 void TemplateTable::castore() { 995 transition(itos, vtos); 996 997 const Register Rindex = R11_scratch1, 998 Rarray = R12_scratch2, 999 Rscratch = R3_ARG1; 1000 __ pop_i(Rindex); 1001 // tos: val 1002 // Rarray: array ptr (popped by index_check) 1003 __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray); 1004 __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray); 1005 } 1006 1007 void TemplateTable::sastore() { 1008 castore(); 1009 } 1010 1011 void TemplateTable::istore(int n) { 1012 transition(itos, vtos); 1013 __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1014 } 1015 1016 void TemplateTable::lstore(int n) { 1017 transition(ltos, vtos); 1018 __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1019 } 1020 1021 void TemplateTable::fstore(int n) { 1022 transition(ftos, vtos); 1023 __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 1024 } 1025 1026 void TemplateTable::dstore(int n) { 1027 transition(dtos, vtos); 1028 __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1029 } 1030 1031 void TemplateTable::astore(int n) { 1032 transition(vtos, vtos); 1033 1034 __ pop_ptr(); 1035 __ verify_oop_or_return_address(R17_tos, R11_scratch1); 1036 __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1037 } 1038 1039 void TemplateTable::pop() { 1040 transition(vtos, vtos); 1041 1042 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize); 1043 } 1044 1045 void TemplateTable::pop2() { 1046 transition(vtos, vtos); 1047 1048 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2); 1049 } 1050 1051 void TemplateTable::dup() { 1052 transition(vtos, vtos); 1053 1054 __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp); 1055 __ push_ptr(R11_scratch1); 1056 } 1057 1058 void TemplateTable::dup_x1() { 1059 transition(vtos, vtos); 1060 1061 Register Ra = R11_scratch1, 1062 Rb = R12_scratch2; 1063 // stack: ..., a, b 1064 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1065 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1066 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1067 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1068 __ push_ptr(Rb); 1069 // stack: ..., b, a, b 1070 } 1071 1072 void TemplateTable::dup_x2() { 1073 transition(vtos, vtos); 1074 1075 Register Ra = R11_scratch1, 1076 Rb = R12_scratch2, 1077 Rc = R3_ARG1; 1078 1079 // stack: ..., a, b, c 1080 __ ld(Rc, Interpreter::stackElementSize, R15_esp); // load c 1081 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); // load a 1082 __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a 1083 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); // load b 1084 // stack: ..., c, b, c 1085 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b 1086 // stack: ..., c, a, c 1087 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in c 1088 __ push_ptr(Rc); // push c 1089 // stack: ..., c, a, b, c 1090 } 1091 1092 void TemplateTable::dup2() { 1093 transition(vtos, vtos); 1094 1095 Register Ra = R11_scratch1, 1096 Rb = R12_scratch2; 1097 // stack: ..., a, b 1098 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1099 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1100 __ push_2ptrs(Ra, Rb); 1101 // stack: ..., a, b, a, b 1102 } 1103 1104 void TemplateTable::dup2_x1() { 1105 transition(vtos, vtos); 1106 1107 Register Ra = R11_scratch1, 1108 Rb = R12_scratch2, 1109 Rc = R3_ARG1; 1110 // stack: ..., a, b, c 1111 __ ld(Rc, Interpreter::stackElementSize, R15_esp); 1112 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); 1113 __ std(Rc, Interpreter::stackElementSize * 2, R15_esp); 1114 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); 1115 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1116 __ std(Rb, Interpreter::stackElementSize * 3, R15_esp); 1117 // stack: ..., b, c, a 1118 __ push_2ptrs(Rb, Rc); 1119 // stack: ..., b, c, a, b, c 1120 } 1121 1122 void TemplateTable::dup2_x2() { 1123 transition(vtos, vtos); 1124 1125 Register Ra = R11_scratch1, 1126 Rb = R12_scratch2, 1127 Rc = R3_ARG1, 1128 Rd = R4_ARG2; 1129 // stack: ..., a, b, c, d 1130 __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp); 1131 __ ld(Rd, Interpreter::stackElementSize, R15_esp); 1132 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in d 1133 __ std(Rd, Interpreter::stackElementSize * 3, R15_esp); // store d in b 1134 __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp); 1135 __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp); 1136 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in c 1137 __ std(Rc, Interpreter::stackElementSize * 4, R15_esp); // store c in a 1138 // stack: ..., c, d, a, b 1139 __ push_2ptrs(Rc, Rd); 1140 // stack: ..., c, d, a, b, c, d 1141 } 1142 1143 void TemplateTable::swap() { 1144 transition(vtos, vtos); 1145 // stack: ..., a, b 1146 1147 Register Ra = R11_scratch1, 1148 Rb = R12_scratch2; 1149 // stack: ..., a, b 1150 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1151 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1152 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1153 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1154 // stack: ..., b, a 1155 } 1156 1157 void TemplateTable::iop2(Operation op) { 1158 transition(itos, itos); 1159 1160 Register Rscratch = R11_scratch1; 1161 1162 __ pop_i(Rscratch); 1163 // tos = number of bits to shift 1164 // Rscratch = value to shift 1165 switch (op) { 1166 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1167 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1168 case mul: __ mullw(R17_tos, Rscratch, R17_tos); break; 1169 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1170 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1171 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1172 case shl: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break; 1173 case shr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break; 1174 case ushr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break; 1175 default: ShouldNotReachHere(); 1176 } 1177 } 1178 1179 void TemplateTable::lop2(Operation op) { 1180 transition(ltos, ltos); 1181 1182 Register Rscratch = R11_scratch1; 1183 __ pop_l(Rscratch); 1184 switch (op) { 1185 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1186 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1187 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1188 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1189 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1190 default: ShouldNotReachHere(); 1191 } 1192 } 1193 1194 void TemplateTable::idiv() { 1195 transition(itos, itos); 1196 1197 Label Lnormal, Lexception, Ldone; 1198 Register Rdividend = R11_scratch1; // Used by irem. 1199 1200 __ addi(R0, R17_tos, 1); 1201 __ cmplwi(CCR0, R0, 2); 1202 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1203 1204 __ cmpwi(CCR1, R17_tos, 0); 1205 __ beq(CCR1, Lexception); // divisor == 0 1206 1207 __ pop_i(Rdividend); 1208 __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1 1209 __ b(Ldone); 1210 1211 __ bind(Lexception); 1212 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1213 __ mtctr(R11_scratch1); 1214 __ bctr(); 1215 1216 __ align(32, 12); 1217 __ bind(Lnormal); 1218 __ pop_i(Rdividend); 1219 __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1220 __ bind(Ldone); 1221 } 1222 1223 void TemplateTable::irem() { 1224 transition(itos, itos); 1225 1226 __ mr(R12_scratch2, R17_tos); 1227 idiv(); 1228 __ mullw(R17_tos, R17_tos, R12_scratch2); 1229 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv. 1230 } 1231 1232 void TemplateTable::lmul() { 1233 transition(ltos, ltos); 1234 1235 __ pop_l(R11_scratch1); 1236 __ mulld(R17_tos, R11_scratch1, R17_tos); 1237 } 1238 1239 void TemplateTable::ldiv() { 1240 transition(ltos, ltos); 1241 1242 Label Lnormal, Lexception, Ldone; 1243 Register Rdividend = R11_scratch1; // Used by lrem. 1244 1245 __ addi(R0, R17_tos, 1); 1246 __ cmpldi(CCR0, R0, 2); 1247 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1248 1249 __ cmpdi(CCR1, R17_tos, 0); 1250 __ beq(CCR1, Lexception); // divisor == 0 1251 1252 __ pop_l(Rdividend); 1253 __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1 1254 __ b(Ldone); 1255 1256 __ bind(Lexception); 1257 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1258 __ mtctr(R11_scratch1); 1259 __ bctr(); 1260 1261 __ align(32, 12); 1262 __ bind(Lnormal); 1263 __ pop_l(Rdividend); 1264 __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1265 __ bind(Ldone); 1266 } 1267 1268 void TemplateTable::lrem() { 1269 transition(ltos, ltos); 1270 1271 __ mr(R12_scratch2, R17_tos); 1272 ldiv(); 1273 __ mulld(R17_tos, R17_tos, R12_scratch2); 1274 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv. 1275 } 1276 1277 void TemplateTable::lshl() { 1278 transition(itos, ltos); 1279 1280 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1281 __ pop_l(R11_scratch1); 1282 __ sld(R17_tos, R11_scratch1, R17_tos); 1283 } 1284 1285 void TemplateTable::lshr() { 1286 transition(itos, ltos); 1287 1288 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1289 __ pop_l(R11_scratch1); 1290 __ srad(R17_tos, R11_scratch1, R17_tos); 1291 } 1292 1293 void TemplateTable::lushr() { 1294 transition(itos, ltos); 1295 1296 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1297 __ pop_l(R11_scratch1); 1298 __ srd(R17_tos, R11_scratch1, R17_tos); 1299 } 1300 1301 void TemplateTable::fop2(Operation op) { 1302 transition(ftos, ftos); 1303 1304 switch (op) { 1305 case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break; 1306 case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1307 case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break; 1308 case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1309 case rem: 1310 __ pop_f(F1_ARG1); 1311 __ fmr(F2_ARG2, F15_ftos); 1312 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1313 __ fmr(F15_ftos, F1_RET); 1314 break; 1315 1316 default: ShouldNotReachHere(); 1317 } 1318 } 1319 1320 void TemplateTable::dop2(Operation op) { 1321 transition(dtos, dtos); 1322 1323 switch (op) { 1324 case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break; 1325 case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break; 1326 case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break; 1327 case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break; 1328 case rem: 1329 __ pop_d(F1_ARG1); 1330 __ fmr(F2_ARG2, F15_ftos); 1331 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1332 __ fmr(F15_ftos, F1_RET); 1333 break; 1334 1335 default: ShouldNotReachHere(); 1336 } 1337 } 1338 1339 // Negate the value in the TOS cache. 1340 void TemplateTable::ineg() { 1341 transition(itos, itos); 1342 1343 __ neg(R17_tos, R17_tos); 1344 } 1345 1346 // Negate the value in the TOS cache. 1347 void TemplateTable::lneg() { 1348 transition(ltos, ltos); 1349 1350 __ neg(R17_tos, R17_tos); 1351 } 1352 1353 void TemplateTable::fneg() { 1354 transition(ftos, ftos); 1355 1356 __ fneg(F15_ftos, F15_ftos); 1357 } 1358 1359 void TemplateTable::dneg() { 1360 transition(dtos, dtos); 1361 1362 __ fneg(F15_ftos, F15_ftos); 1363 } 1364 1365 // Increments a local variable in place. 1366 void TemplateTable::iinc() { 1367 transition(vtos, vtos); 1368 1369 const Register Rindex = R11_scratch1, 1370 Rincrement = R0, 1371 Rvalue = R12_scratch2; 1372 1373 locals_index(Rindex); // Load locals index from bytecode stream. 1374 __ lbz(Rincrement, 2, R14_bcp); // Load increment from the bytecode stream. 1375 __ extsb(Rincrement, Rincrement); 1376 1377 __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex. 1378 1379 __ add(Rvalue, Rincrement, Rvalue); 1380 __ stw(Rvalue, 0, Rindex); 1381 } 1382 1383 void TemplateTable::wide_iinc() { 1384 transition(vtos, vtos); 1385 1386 Register Rindex = R11_scratch1, 1387 Rlocals_addr = Rindex, 1388 Rincr = R12_scratch2; 1389 locals_index_wide(Rindex); 1390 __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed); 1391 __ load_local_int(R17_tos, Rlocals_addr, Rindex); 1392 __ add(R17_tos, Rincr, R17_tos); 1393 __ stw(R17_tos, 0, Rlocals_addr); 1394 } 1395 1396 void TemplateTable::convert() { 1397 // %%%%% Factor this first part accross platforms 1398 #ifdef ASSERT 1399 TosState tos_in = ilgl; 1400 TosState tos_out = ilgl; 1401 switch (bytecode()) { 1402 case Bytecodes::_i2l: // fall through 1403 case Bytecodes::_i2f: // fall through 1404 case Bytecodes::_i2d: // fall through 1405 case Bytecodes::_i2b: // fall through 1406 case Bytecodes::_i2c: // fall through 1407 case Bytecodes::_i2s: tos_in = itos; break; 1408 case Bytecodes::_l2i: // fall through 1409 case Bytecodes::_l2f: // fall through 1410 case Bytecodes::_l2d: tos_in = ltos; break; 1411 case Bytecodes::_f2i: // fall through 1412 case Bytecodes::_f2l: // fall through 1413 case Bytecodes::_f2d: tos_in = ftos; break; 1414 case Bytecodes::_d2i: // fall through 1415 case Bytecodes::_d2l: // fall through 1416 case Bytecodes::_d2f: tos_in = dtos; break; 1417 default : ShouldNotReachHere(); 1418 } 1419 switch (bytecode()) { 1420 case Bytecodes::_l2i: // fall through 1421 case Bytecodes::_f2i: // fall through 1422 case Bytecodes::_d2i: // fall through 1423 case Bytecodes::_i2b: // fall through 1424 case Bytecodes::_i2c: // fall through 1425 case Bytecodes::_i2s: tos_out = itos; break; 1426 case Bytecodes::_i2l: // fall through 1427 case Bytecodes::_f2l: // fall through 1428 case Bytecodes::_d2l: tos_out = ltos; break; 1429 case Bytecodes::_i2f: // fall through 1430 case Bytecodes::_l2f: // fall through 1431 case Bytecodes::_d2f: tos_out = ftos; break; 1432 case Bytecodes::_i2d: // fall through 1433 case Bytecodes::_l2d: // fall through 1434 case Bytecodes::_f2d: tos_out = dtos; break; 1435 default : ShouldNotReachHere(); 1436 } 1437 transition(tos_in, tos_out); 1438 #endif 1439 1440 // Conversion 1441 Label done; 1442 switch (bytecode()) { 1443 case Bytecodes::_i2l: 1444 __ extsw(R17_tos, R17_tos); 1445 break; 1446 1447 case Bytecodes::_l2i: 1448 // Nothing to do, we'll continue to work with the lower bits. 1449 break; 1450 1451 case Bytecodes::_i2b: 1452 __ extsb(R17_tos, R17_tos); 1453 break; 1454 1455 case Bytecodes::_i2c: 1456 __ rldicl(R17_tos, R17_tos, 0, 64-2*8); 1457 break; 1458 1459 case Bytecodes::_i2s: 1460 __ extsh(R17_tos, R17_tos); 1461 break; 1462 1463 case Bytecodes::_i2d: 1464 __ extsw(R17_tos, R17_tos); 1465 case Bytecodes::_l2d: 1466 __ push_l_pop_d(); 1467 __ fcfid(F15_ftos, F15_ftos); 1468 break; 1469 1470 case Bytecodes::_i2f: 1471 __ extsw(R17_tos, R17_tos); 1472 __ push_l_pop_d(); 1473 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1474 // Comment: alternatively, load with sign extend could be done by lfiwax. 1475 __ fcfids(F15_ftos, F15_ftos); 1476 } else { 1477 __ fcfid(F15_ftos, F15_ftos); 1478 __ frsp(F15_ftos, F15_ftos); 1479 } 1480 break; 1481 1482 case Bytecodes::_l2f: 1483 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1484 __ push_l_pop_d(); 1485 __ fcfids(F15_ftos, F15_ftos); 1486 } else { 1487 // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp. 1488 __ mr(R3_ARG1, R17_tos); 1489 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f)); 1490 __ fmr(F15_ftos, F1_RET); 1491 } 1492 break; 1493 1494 case Bytecodes::_f2d: 1495 // empty 1496 break; 1497 1498 case Bytecodes::_d2f: 1499 __ frsp(F15_ftos, F15_ftos); 1500 break; 1501 1502 case Bytecodes::_d2i: 1503 case Bytecodes::_f2i: 1504 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1505 __ li(R17_tos, 0); // 0 in case of NAN 1506 __ bso(CCR0, done); 1507 __ fctiwz(F15_ftos, F15_ftos); 1508 __ push_d_pop_l(); 1509 break; 1510 1511 case Bytecodes::_d2l: 1512 case Bytecodes::_f2l: 1513 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1514 __ li(R17_tos, 0); // 0 in case of NAN 1515 __ bso(CCR0, done); 1516 __ fctidz(F15_ftos, F15_ftos); 1517 __ push_d_pop_l(); 1518 break; 1519 1520 default: ShouldNotReachHere(); 1521 } 1522 __ bind(done); 1523 } 1524 1525 // Long compare 1526 void TemplateTable::lcmp() { 1527 transition(ltos, itos); 1528 1529 const Register Rscratch = R11_scratch1; 1530 __ pop_l(Rscratch); // first operand, deeper in stack 1531 1532 __ cmpd(CCR0, Rscratch, R17_tos); // compare 1533 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1534 __ srwi(Rscratch, R17_tos, 30); 1535 __ srawi(R17_tos, R17_tos, 31); 1536 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1537 } 1538 1539 // fcmpl/fcmpg and dcmpl/dcmpg bytecodes 1540 // unordered_result == -1 => fcmpl or dcmpl 1541 // unordered_result == 1 => fcmpg or dcmpg 1542 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1543 const FloatRegister Rfirst = F0_SCRATCH, 1544 Rsecond = F15_ftos; 1545 const Register Rscratch = R11_scratch1; 1546 1547 if (is_float) { 1548 __ pop_f(Rfirst); 1549 } else { 1550 __ pop_d(Rfirst); 1551 } 1552 1553 Label Lunordered, Ldone; 1554 __ fcmpu(CCR0, Rfirst, Rsecond); // compare 1555 if (unordered_result) { 1556 __ bso(CCR0, Lunordered); 1557 } 1558 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1559 __ srwi(Rscratch, R17_tos, 30); 1560 __ srawi(R17_tos, R17_tos, 31); 1561 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1562 if (unordered_result) { 1563 __ b(Ldone); 1564 __ bind(Lunordered); 1565 __ load_const_optimized(R17_tos, unordered_result); 1566 } 1567 __ bind(Ldone); 1568 } 1569 1570 // Branch_conditional which takes TemplateTable::Condition. 1571 void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) { 1572 bool positive = false; 1573 Assembler::Condition cond = Assembler::equal; 1574 switch (cc) { 1575 case TemplateTable::equal: positive = true ; cond = Assembler::equal ; break; 1576 case TemplateTable::not_equal: positive = false; cond = Assembler::equal ; break; 1577 case TemplateTable::less: positive = true ; cond = Assembler::less ; break; 1578 case TemplateTable::less_equal: positive = false; cond = Assembler::greater; break; 1579 case TemplateTable::greater: positive = true ; cond = Assembler::greater; break; 1580 case TemplateTable::greater_equal: positive = false; cond = Assembler::less ; break; 1581 default: ShouldNotReachHere(); 1582 } 1583 int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 1584 int bi = Assembler::bi0(crx, cond); 1585 __ bc(bo, bi, L); 1586 } 1587 1588 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1589 1590 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1591 __ verify_thread(); 1592 1593 const Register Rscratch1 = R11_scratch1, 1594 Rscratch2 = R12_scratch2, 1595 Rscratch3 = R3_ARG1, 1596 R4_counters = R4_ARG2, 1597 bumped_count = R31, 1598 Rdisp = R22_tmp2; 1599 1600 __ profile_taken_branch(Rscratch1, bumped_count); 1601 1602 // Get (wide) offset. 1603 if (is_wide) { 1604 __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1605 } else { 1606 __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1607 } 1608 1609 // -------------------------------------------------------------------------- 1610 // Handle all the JSR stuff here, then exit. 1611 // It's much shorter and cleaner than intermingling with the 1612 // non-JSR normal-branch stuff occurring below. 1613 if (is_jsr) { 1614 // Compute return address as bci in Otos_i. 1615 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1616 __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3)); 1617 __ subf(R17_tos, Rscratch1, Rscratch2); 1618 1619 // Bump bcp to target of JSR. 1620 __ add(R14_bcp, Rdisp, R14_bcp); 1621 // Push returnAddress for "ret" on stack. 1622 __ push_ptr(R17_tos); 1623 // And away we go! 1624 __ dispatch_next(vtos); 1625 return; 1626 } 1627 1628 // -------------------------------------------------------------------------- 1629 // Normal (non-jsr) branch handling 1630 1631 const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1632 if (increment_invocation_counter_for_backward_branches) { 1633 //__ unimplemented("branch invocation counter"); 1634 1635 Label Lforward; 1636 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1637 1638 // Check branch direction. 1639 __ cmpdi(CCR0, Rdisp, 0); 1640 __ bgt(CCR0, Lforward); 1641 1642 __ get_method_counters(R19_method, R4_counters, Lforward); 1643 1644 if (TieredCompilation) { 1645 Label Lno_mdo, Loverflow; 1646 const int increment = InvocationCounter::count_increment; 1647 const int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 1648 if (ProfileInterpreter) { 1649 Register Rmdo = Rscratch1; 1650 1651 // If no method data exists, go to profile_continue. 1652 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); 1653 __ cmpdi(CCR0, Rmdo, 0); 1654 __ beq(CCR0, Lno_mdo); 1655 1656 // Increment backedge counter in the MDO. 1657 const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1658 __ lwz(Rscratch2, mdo_bc_offs, Rmdo); 1659 __ load_const_optimized(Rscratch3, mask, R0); 1660 __ addi(Rscratch2, Rscratch2, increment); 1661 __ stw(Rscratch2, mdo_bc_offs, Rmdo); 1662 __ and_(Rscratch3, Rscratch2, Rscratch3); 1663 __ bne(CCR0, Lforward); 1664 __ b(Loverflow); 1665 } 1666 1667 // If there's no MDO, increment counter in method. 1668 const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1669 __ bind(Lno_mdo); 1670 __ lwz(Rscratch2, mo_bc_offs, R4_counters); 1671 __ load_const_optimized(Rscratch3, mask, R0); 1672 __ addi(Rscratch2, Rscratch2, increment); 1673 __ stw(Rscratch2, mo_bc_offs, R19_method); 1674 __ and_(Rscratch3, Rscratch2, Rscratch3); 1675 __ bne(CCR0, Lforward); 1676 1677 __ bind(Loverflow); 1678 1679 // Notify point for loop, pass branch bytecode. 1680 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R14_bcp, true); 1681 1682 // Was an OSR adapter generated? 1683 // O0 = osr nmethod 1684 __ cmpdi(CCR0, R3_RET, 0); 1685 __ beq(CCR0, Lforward); 1686 1687 // Has the nmethod been invalidated already? 1688 __ lwz(R0, nmethod::entry_bci_offset(), R3_RET); 1689 __ cmpwi(CCR0, R0, InvalidOSREntryBci); 1690 __ beq(CCR0, Lforward); 1691 1692 // Migrate the interpreter frame off of the stack. 1693 // We can use all registers because we will not return to interpreter from this point. 1694 1695 // Save nmethod. 1696 const Register osr_nmethod = R31; 1697 __ mr(osr_nmethod, R3_RET); 1698 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1); 1699 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread); 1700 __ reset_last_Java_frame(); 1701 // OSR buffer is in ARG1. 1702 1703 // Remove the interpreter frame. 1704 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1705 1706 // Jump to the osr code. 1707 __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod); 1708 __ mtlr(R0); 1709 __ mtctr(R11_scratch1); 1710 __ bctr(); 1711 1712 } else { 1713 1714 const Register invoke_ctr = Rscratch1; 1715 // Update Backedge branch separately from invocations. 1716 __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3); 1717 1718 if (ProfileInterpreter) { 1719 __ test_invocation_counter_for_mdp(invoke_ctr, Rscratch2, Lforward); 1720 if (UseOnStackReplacement) { 1721 __ test_backedge_count_for_osr(bumped_count, R14_bcp, Rscratch2); 1722 } 1723 } else { 1724 if (UseOnStackReplacement) { 1725 __ test_backedge_count_for_osr(invoke_ctr, R14_bcp, Rscratch2); 1726 } 1727 } 1728 } 1729 1730 __ bind(Lforward); 1731 1732 } else { 1733 // Bump bytecode pointer by displacement (take the branch). 1734 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1735 } 1736 // Continue with bytecode @ target. 1737 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above, 1738 // %%%%% and changing dispatch_next to dispatch_only. 1739 __ dispatch_next(vtos); 1740 } 1741 1742 // Helper function for if_cmp* methods below. 1743 // Factored out common compare and branch code. 1744 void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) { 1745 Label Lnot_taken; 1746 // Note: The condition code we get is the condition under which we 1747 // *fall through*! So we have to inverse the CC here. 1748 1749 if (is_jint) { 1750 if (cmp0) { 1751 __ cmpwi(CCR0, Rfirst, 0); 1752 } else { 1753 __ cmpw(CCR0, Rfirst, Rsecond); 1754 } 1755 } else { 1756 if (cmp0) { 1757 __ cmpdi(CCR0, Rfirst, 0); 1758 } else { 1759 __ cmpd(CCR0, Rfirst, Rsecond); 1760 } 1761 } 1762 branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true); 1763 1764 // Conition is false => Jump! 1765 branch(false, false); 1766 1767 // Condition is not true => Continue. 1768 __ align(32, 12); 1769 __ bind(Lnot_taken); 1770 __ profile_not_taken_branch(Rscratch1, Rscratch2); 1771 } 1772 1773 // Compare integer values with zero and fall through if CC holds, branch away otherwise. 1774 void TemplateTable::if_0cmp(Condition cc) { 1775 transition(itos, vtos); 1776 1777 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true); 1778 } 1779 1780 // Compare integer values and fall through if CC holds, branch away otherwise. 1781 // 1782 // Interface: 1783 // - Rfirst: First operand (older stack value) 1784 // - tos: Second operand (younger stack value) 1785 void TemplateTable::if_icmp(Condition cc) { 1786 transition(itos, vtos); 1787 1788 const Register Rfirst = R0, 1789 Rsecond = R17_tos; 1790 1791 __ pop_i(Rfirst); 1792 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false); 1793 } 1794 1795 void TemplateTable::if_nullcmp(Condition cc) { 1796 transition(atos, vtos); 1797 1798 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true); 1799 } 1800 1801 void TemplateTable::if_acmp(Condition cc) { 1802 transition(atos, vtos); 1803 1804 const Register Rfirst = R0, 1805 Rsecond = R17_tos; 1806 1807 __ pop_ptr(Rfirst); 1808 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false); 1809 } 1810 1811 void TemplateTable::ret() { 1812 locals_index(R11_scratch1); 1813 __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1); 1814 1815 __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2); 1816 1817 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method); 1818 __ add(R11_scratch1, R17_tos, R11_scratch1); 1819 __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset())); 1820 __ dispatch_next(vtos); 1821 } 1822 1823 void TemplateTable::wide_ret() { 1824 transition(vtos, vtos); 1825 1826 const Register Rindex = R3_ARG1, 1827 Rscratch1 = R11_scratch1, 1828 Rscratch2 = R12_scratch2; 1829 1830 locals_index_wide(Rindex); 1831 __ load_local_ptr(R17_tos, R17_tos, Rindex); 1832 __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2); 1833 // Tos now contains the bci, compute the bcp from that. 1834 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1835 __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset())); 1836 __ add(R14_bcp, Rscratch1, Rscratch2); 1837 __ dispatch_next(vtos); 1838 } 1839 1840 void TemplateTable::tableswitch() { 1841 transition(itos, vtos); 1842 1843 Label Ldispatch, Ldefault_case; 1844 Register Rlow_byte = R3_ARG1, 1845 Rindex = Rlow_byte, 1846 Rhigh_byte = R4_ARG2, 1847 Rdef_offset_addr = R5_ARG3, // is going to contain address of default offset 1848 Rscratch1 = R11_scratch1, 1849 Rscratch2 = R12_scratch2, 1850 Roffset = R6_ARG4; 1851 1852 // Align bcp. 1853 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1854 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1855 1856 // Load lo & hi. 1857 __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1858 __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned); 1859 1860 // Check for default case (=index outside [low,high]). 1861 __ cmpw(CCR0, R17_tos, Rlow_byte); 1862 __ cmpw(CCR1, R17_tos, Rhigh_byte); 1863 __ blt(CCR0, Ldefault_case); 1864 __ bgt(CCR1, Ldefault_case); 1865 1866 // Lookup dispatch offset. 1867 __ sub(Rindex, R17_tos, Rlow_byte); 1868 __ extsw(Rindex, Rindex); 1869 __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2); 1870 __ sldi(Rindex, Rindex, LogBytesPerInt); 1871 __ addi(Rindex, Rindex, 3 * BytesPerInt); 1872 #if defined(VM_LITTLE_ENDIAN) 1873 __ lwbrx(Roffset, Rdef_offset_addr, Rindex); 1874 __ extsw(Roffset, Roffset); 1875 #else 1876 __ lwax(Roffset, Rdef_offset_addr, Rindex); 1877 #endif 1878 __ b(Ldispatch); 1879 1880 __ bind(Ldefault_case); 1881 __ profile_switch_default(Rhigh_byte, Rscratch1); 1882 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 1883 1884 __ bind(Ldispatch); 1885 1886 __ add(R14_bcp, Roffset, R14_bcp); 1887 __ dispatch_next(vtos); 1888 } 1889 1890 void TemplateTable::lookupswitch() { 1891 transition(itos, itos); 1892 __ stop("lookupswitch bytecode should have been rewritten"); 1893 } 1894 1895 // Table switch using linear search through cases. 1896 // Bytecode stream format: 1897 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 1898 // Note: Everything is big-endian format here. 1899 void TemplateTable::fast_linearswitch() { 1900 transition(itos, vtos); 1901 1902 Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case; 1903 Register Rcount = R3_ARG1, 1904 Rcurrent_pair = R4_ARG2, 1905 Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset. 1906 Roffset = R31, // Might need to survive C call. 1907 Rvalue = R12_scratch2, 1908 Rscratch = R11_scratch1, 1909 Rcmp_value = R17_tos; 1910 1911 // Align bcp. 1912 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1913 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1914 1915 // Setup loop counter and limit. 1916 __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1917 __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair. 1918 1919 __ mtctr(Rcount); 1920 __ cmpwi(CCR0, Rcount, 0); 1921 __ bne(CCR0, Lloop_entry); 1922 1923 // Default case 1924 __ bind(Ldefault_case); 1925 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 1926 if (ProfileInterpreter) { 1927 __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */); 1928 } 1929 __ b(Lcontinue_execution); 1930 1931 // Next iteration 1932 __ bind(Lsearch_loop); 1933 __ bdz(Ldefault_case); 1934 __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt); 1935 __ bind(Lloop_entry); 1936 __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned); 1937 __ cmpw(CCR0, Rvalue, Rcmp_value); 1938 __ bne(CCR0, Lsearch_loop); 1939 1940 // Found, load offset. 1941 __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed); 1942 // Calculate case index and profile 1943 __ mfctr(Rcurrent_pair); 1944 if (ProfileInterpreter) { 1945 __ sub(Rcurrent_pair, Rcount, Rcurrent_pair); 1946 __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch); 1947 } 1948 1949 __ bind(Lcontinue_execution); 1950 __ add(R14_bcp, Roffset, R14_bcp); 1951 __ dispatch_next(vtos); 1952 } 1953 1954 // Table switch using binary search (value/offset pairs are ordered). 1955 // Bytecode stream format: 1956 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 1957 // Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value. 1958 void TemplateTable::fast_binaryswitch() { 1959 1960 transition(itos, vtos); 1961 // Implementation using the following core algorithm: (copied from Intel) 1962 // 1963 // int binary_search(int key, LookupswitchPair* array, int n) { 1964 // // Binary search according to "Methodik des Programmierens" by 1965 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 1966 // int i = 0; 1967 // int j = n; 1968 // while (i+1 < j) { 1969 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 1970 // // with Q: for all i: 0 <= i < n: key < a[i] 1971 // // where a stands for the array and assuming that the (inexisting) 1972 // // element a[n] is infinitely big. 1973 // int h = (i + j) >> 1; 1974 // // i < h < j 1975 // if (key < array[h].fast_match()) { 1976 // j = h; 1977 // } else { 1978 // i = h; 1979 // } 1980 // } 1981 // // R: a[i] <= key < a[i+1] or Q 1982 // // (i.e., if key is within array, i is the correct index) 1983 // return i; 1984 // } 1985 1986 // register allocation 1987 const Register Rkey = R17_tos; // already set (tosca) 1988 const Register Rarray = R3_ARG1; 1989 const Register Ri = R4_ARG2; 1990 const Register Rj = R5_ARG3; 1991 const Register Rh = R6_ARG4; 1992 const Register Rscratch = R11_scratch1; 1993 1994 const int log_entry_size = 3; 1995 const int entry_size = 1 << log_entry_size; 1996 1997 Label found; 1998 1999 // Find Array start, 2000 __ addi(Rarray, R14_bcp, 3 * BytesPerInt); 2001 __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt)); 2002 2003 // initialize i & j 2004 __ li(Ri,0); 2005 __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned); 2006 2007 // and start. 2008 Label entry; 2009 __ b(entry); 2010 2011 // binary search loop 2012 { Label loop; 2013 __ bind(loop); 2014 // int h = (i + j) >> 1; 2015 __ srdi(Rh, Rh, 1); 2016 // if (key < array[h].fast_match()) { 2017 // j = h; 2018 // } else { 2019 // i = h; 2020 // } 2021 __ sldi(Rscratch, Rh, log_entry_size); 2022 #if defined(VM_LITTLE_ENDIAN) 2023 __ lwbrx(Rscratch, Rscratch, Rarray); 2024 #else 2025 __ lwzx(Rscratch, Rscratch, Rarray); 2026 #endif 2027 2028 // if (key < current value) 2029 // Rh = Rj 2030 // else 2031 // Rh = Ri 2032 Label Lgreater; 2033 __ cmpw(CCR0, Rkey, Rscratch); 2034 __ bge(CCR0, Lgreater); 2035 __ mr(Rj, Rh); 2036 __ b(entry); 2037 __ bind(Lgreater); 2038 __ mr(Ri, Rh); 2039 2040 // while (i+1 < j) 2041 __ bind(entry); 2042 __ addi(Rscratch, Ri, 1); 2043 __ cmpw(CCR0, Rscratch, Rj); 2044 __ add(Rh, Ri, Rj); // start h = i + j >> 1; 2045 2046 __ blt(CCR0, loop); 2047 } 2048 2049 // End of binary search, result index is i (must check again!). 2050 Label default_case; 2051 Label continue_execution; 2052 if (ProfileInterpreter) { 2053 __ mr(Rh, Ri); // Save index in i for profiling. 2054 } 2055 // Ri = value offset 2056 __ sldi(Ri, Ri, log_entry_size); 2057 __ add(Ri, Ri, Rarray); 2058 __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned); 2059 2060 Label not_found; 2061 // Ri = offset offset 2062 __ cmpw(CCR0, Rkey, Rscratch); 2063 __ beq(CCR0, not_found); 2064 // entry not found -> j = default offset 2065 __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned); 2066 __ b(default_case); 2067 2068 __ bind(not_found); 2069 // entry found -> j = offset 2070 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 2071 __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned); 2072 2073 if (ProfileInterpreter) { 2074 __ b(continue_execution); 2075 } 2076 2077 __ bind(default_case); // fall through (if not profiling) 2078 __ profile_switch_default(Ri, Rscratch); 2079 2080 __ bind(continue_execution); 2081 2082 __ extsw(Rj, Rj); 2083 __ add(R14_bcp, Rj, R14_bcp); 2084 __ dispatch_next(vtos); 2085 } 2086 2087 void TemplateTable::_return(TosState state) { 2088 transition(state, state); 2089 assert(_desc->calls_vm(), 2090 "inconsistent calls_vm information"); // call in remove_activation 2091 2092 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2093 2094 Register Rscratch = R11_scratch1, 2095 Rklass = R12_scratch2, 2096 Rklass_flags = Rklass; 2097 Label Lskip_register_finalizer; 2098 2099 // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case. 2100 assert(state == vtos, "only valid state"); 2101 __ ld(R17_tos, 0, R18_locals); 2102 2103 // Load klass of this obj. 2104 __ load_klass(Rklass, R17_tos); 2105 __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass); 2106 __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER)); 2107 __ bfalse(CCR0, Lskip_register_finalizer); 2108 2109 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */); 2110 2111 __ align(32, 12); 2112 __ bind(Lskip_register_finalizer); 2113 } 2114 2115 // Move the result value into the correct register and remove memory stack frame. 2116 __ remove_activation(state, /* throw_monitor_exception */ true); 2117 // Restoration of lr done by remove_activation. 2118 switch (state) { 2119 // Narrow result if state is itos but result type is smaller. 2120 // Need to narrow in the return bytecode rather than in generate_return_entry 2121 // since compiled code callers expect the result to already be narrowed. 2122 case itos: __ narrow(R17_tos); /* fall through */ 2123 case ltos: 2124 case btos: 2125 case ztos: 2126 case ctos: 2127 case stos: 2128 case atos: __ mr(R3_RET, R17_tos); break; 2129 case ftos: 2130 case dtos: __ fmr(F1_RET, F15_ftos); break; 2131 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need 2132 // to get visible before the reference to the object gets stored anywhere. 2133 __ membar(Assembler::StoreStore); break; 2134 default : ShouldNotReachHere(); 2135 } 2136 __ blr(); 2137 } 2138 2139 // ============================================================================ 2140 // Constant pool cache access 2141 // 2142 // Memory ordering: 2143 // 2144 // Like done in C++ interpreter, we load the fields 2145 // - _indices 2146 // - _f12_oop 2147 // acquired, because these are asked if the cache is already resolved. We don't 2148 // want to float loads above this check. 2149 // See also comments in ConstantPoolCacheEntry::bytecode_1(), 2150 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1(); 2151 2152 // Call into the VM if call site is not yet resolved 2153 // 2154 // Input regs: 2155 // - None, all passed regs are outputs. 2156 // 2157 // Returns: 2158 // - Rcache: The const pool cache entry that contains the resolved result. 2159 // - Rresult: Either noreg or output for f1/f2. 2160 // 2161 // Kills: 2162 // - Rscratch 2163 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) { 2164 2165 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2166 Label Lresolved, Ldone; 2167 2168 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2169 // We are resolved if the indices offset contains the current bytecode. 2170 #if defined(VM_LITTLE_ENDIAN) 2171 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache); 2172 #else 2173 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache); 2174 #endif 2175 // Acquire by cmp-br-isync (see below). 2176 __ cmpdi(CCR0, Rscratch, (int)bytecode()); 2177 __ beq(CCR0, Lresolved); 2178 2179 address entry = NULL; 2180 switch (bytecode()) { 2181 case Bytecodes::_getstatic : // fall through 2182 case Bytecodes::_putstatic : // fall through 2183 case Bytecodes::_getfield : // fall through 2184 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break; 2185 case Bytecodes::_invokevirtual : // fall through 2186 case Bytecodes::_invokespecial : // fall through 2187 case Bytecodes::_invokestatic : // fall through 2188 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; 2189 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break; 2190 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; 2191 default : ShouldNotReachHere(); break; 2192 } 2193 __ li(R4_ARG2, (int)bytecode()); 2194 __ call_VM(noreg, entry, R4_ARG2, true); 2195 2196 // Update registers with resolved info. 2197 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2198 __ b(Ldone); 2199 2200 __ bind(Lresolved); 2201 __ isync(); // Order load wrt. succeeding loads. 2202 __ bind(Ldone); 2203 } 2204 2205 // Load the constant pool cache entry at field accesses into registers. 2206 // The Rcache and Rindex registers must be set before call. 2207 // Input: 2208 // - Rcache, Rindex 2209 // Output: 2210 // - Robj, Roffset, Rflags 2211 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2212 Register Rcache, 2213 Register Rindex /* unused on PPC64 */, 2214 Register Roffset, 2215 Register Rflags, 2216 bool is_static = false) { 2217 assert_different_registers(Rcache, Rflags, Roffset); 2218 // assert(Rindex == noreg, "parameter not used on PPC64"); 2219 2220 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2221 __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); 2222 __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache); 2223 if (is_static) { 2224 __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache); 2225 __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj); 2226 // Acquire not needed here. Following access has an address dependency on this value. 2227 } 2228 } 2229 2230 // Load the constant pool cache entry at invokes into registers. 2231 // Resolve if necessary. 2232 2233 // Input Registers: 2234 // - None, bcp is used, though 2235 // 2236 // Return registers: 2237 // - Rmethod (f1 field or f2 if invokevirtual) 2238 // - Ritable_index (f2 field) 2239 // - Rflags (flags field) 2240 // 2241 // Kills: 2242 // - R21 2243 // 2244 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2245 Register Rmethod, 2246 Register Ritable_index, 2247 Register Rflags, 2248 bool is_invokevirtual, 2249 bool is_invokevfinal, 2250 bool is_invokedynamic) { 2251 2252 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2253 // Determine constant pool cache field offsets. 2254 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2255 const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset())); 2256 const int flags_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()); 2257 // Access constant pool cache fields. 2258 const int index_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()); 2259 2260 Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP. 2261 2262 if (is_invokevfinal) { 2263 assert(Ritable_index == noreg, "register not used"); 2264 // Already resolved. 2265 __ get_cache_and_index_at_bcp(Rcache, 1); 2266 } else { 2267 resolve_cache_and_index(byte_no, Rcache, R0, is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2268 } 2269 2270 __ ld(Rmethod, method_offset, Rcache); 2271 __ ld(Rflags, flags_offset, Rcache); 2272 2273 if (Ritable_index != noreg) { 2274 __ ld(Ritable_index, index_offset, Rcache); 2275 } 2276 } 2277 2278 // ============================================================================ 2279 // Field access 2280 2281 // Volatile variables demand their effects be made known to all CPU's 2282 // in order. Store buffers on most chips allow reads & writes to 2283 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode 2284 // without some kind of memory barrier (i.e., it's not sufficient that 2285 // the interpreter does not reorder volatile references, the hardware 2286 // also must not reorder them). 2287 // 2288 // According to the new Java Memory Model (JMM): 2289 // (1) All volatiles are serialized wrt to each other. ALSO reads & 2290 // writes act as aquire & release, so: 2291 // (2) A read cannot let unrelated NON-volatile memory refs that 2292 // happen after the read float up to before the read. It's OK for 2293 // non-volatile memory refs that happen before the volatile read to 2294 // float down below it. 2295 // (3) Similar a volatile write cannot let unrelated NON-volatile 2296 // memory refs that happen BEFORE the write float down to after the 2297 // write. It's OK for non-volatile memory refs that happen after the 2298 // volatile write to float up before it. 2299 // 2300 // We only put in barriers around volatile refs (they are expensive), 2301 // not _between_ memory refs (that would require us to track the 2302 // flavor of the previous memory refs). Requirements (2) and (3) 2303 // require some barriers before volatile stores and after volatile 2304 // loads. These nearly cover requirement (1) but miss the 2305 // volatile-store-volatile-load case. This final case is placed after 2306 // volatile-stores although it could just as well go before 2307 // volatile-loads. 2308 2309 // The registers cache and index expected to be set before call. 2310 // Correct values of the cache and index registers are preserved. 2311 // Kills: 2312 // Rcache (if has_tos) 2313 // Rscratch 2314 void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) { 2315 2316 assert_different_registers(Rcache, Rscratch); 2317 2318 if (JvmtiExport::can_post_field_access()) { 2319 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2320 Label Lno_field_access_post; 2321 2322 // Check if post field access in enabled. 2323 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true); 2324 __ lwz(Rscratch, offs, Rscratch); 2325 2326 __ cmpwi(CCR0, Rscratch, 0); 2327 __ beq(CCR0, Lno_field_access_post); 2328 2329 // Post access enabled - do it! 2330 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2331 if (is_static) { 2332 __ li(R17_tos, 0); 2333 } else { 2334 if (has_tos) { 2335 // The fast bytecode versions have obj ptr in register. 2336 // Thus, save object pointer before call_VM() clobbers it 2337 // put object on tos where GC wants it. 2338 __ push_ptr(R17_tos); 2339 } else { 2340 // Load top of stack (do not pop the value off the stack). 2341 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); 2342 } 2343 __ verify_oop(R17_tos); 2344 } 2345 // tos: object pointer or NULL if static 2346 // cache: cache entry pointer 2347 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache); 2348 if (!is_static && has_tos) { 2349 // Restore object pointer. 2350 __ pop_ptr(R17_tos); 2351 __ verify_oop(R17_tos); 2352 } else { 2353 // Cache is still needed to get class or obj. 2354 __ get_cache_and_index_at_bcp(Rcache, 1); 2355 } 2356 2357 __ align(32, 12); 2358 __ bind(Lno_field_access_post); 2359 } 2360 } 2361 2362 // kills R11_scratch1 2363 void TemplateTable::pop_and_check_object(Register Roop) { 2364 Register Rtmp = R11_scratch1; 2365 2366 assert_different_registers(Rtmp, Roop); 2367 __ pop_ptr(Roop); 2368 // For field access must check obj. 2369 __ null_check_throw(Roop, -1, Rtmp); 2370 __ verify_oop(Roop); 2371 } 2372 2373 // PPC64: implement volatile loads as fence-store-acquire. 2374 void TemplateTable::getfield_or_static(int byte_no, bool is_static) { 2375 transition(vtos, vtos); 2376 2377 Label Lacquire, Lisync; 2378 2379 const Register Rcache = R3_ARG1, 2380 Rclass_or_obj = R22_tmp2, 2381 Roffset = R23_tmp3, 2382 Rflags = R31, 2383 Rbtable = R5_ARG3, 2384 Rbc = R6_ARG4, 2385 Rscratch = R12_scratch2; 2386 2387 static address field_branch_table[number_of_states], 2388 static_branch_table[number_of_states]; 2389 2390 address* branch_table = is_static ? static_branch_table : field_branch_table; 2391 2392 // Get field offset. 2393 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2394 2395 // JVMTI support 2396 jvmti_post_field_access(Rcache, Rscratch, is_static, false); 2397 2398 // Load after possible GC. 2399 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2400 2401 // Load pointer to branch table. 2402 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2403 2404 // Get volatile flag. 2405 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2406 // Note: sync is needed before volatile load on PPC64. 2407 2408 // Check field type. 2409 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2410 2411 #ifdef ASSERT 2412 Label LFlagInvalid; 2413 __ cmpldi(CCR0, Rflags, number_of_states); 2414 __ bge(CCR0, LFlagInvalid); 2415 #endif 2416 2417 // Load from branch table and dispatch (volatile case: one instruction ahead). 2418 __ sldi(Rflags, Rflags, LogBytesPerWord); 2419 __ cmpwi(CCR6, Rscratch, 1); // Volatile? 2420 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2421 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0. 2422 } 2423 __ ldx(Rbtable, Rbtable, Rflags); 2424 2425 // Get the obj from stack. 2426 if (!is_static) { 2427 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2428 } else { 2429 __ verify_oop(Rclass_or_obj); 2430 } 2431 2432 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2433 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2434 } 2435 __ mtctr(Rbtable); 2436 __ bctr(); 2437 2438 #ifdef ASSERT 2439 __ bind(LFlagInvalid); 2440 __ stop("got invalid flag", 0x654); 2441 2442 // __ bind(Lvtos); 2443 address pc_before_fence = __ pc(); 2444 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2445 assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2446 assert(branch_table[vtos] == 0, "can't compute twice"); 2447 branch_table[vtos] = __ pc(); // non-volatile_entry point 2448 __ stop("vtos unexpected", 0x655); 2449 #endif 2450 2451 __ align(32, 28, 28); // Align load. 2452 // __ bind(Ldtos); 2453 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2454 assert(branch_table[dtos] == 0, "can't compute twice"); 2455 branch_table[dtos] = __ pc(); // non-volatile_entry point 2456 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 2457 __ push(dtos); 2458 if (!is_static) patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch); 2459 { 2460 Label acquire_double; 2461 __ beq(CCR6, acquire_double); // Volatile? 2462 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2463 2464 __ bind(acquire_double); 2465 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2466 __ beq_predict_taken(CCR0, Lisync); 2467 __ b(Lisync); // In case of NAN. 2468 } 2469 2470 __ align(32, 28, 28); // Align load. 2471 // __ bind(Lftos); 2472 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2473 assert(branch_table[ftos] == 0, "can't compute twice"); 2474 branch_table[ftos] = __ pc(); // non-volatile_entry point 2475 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 2476 __ push(ftos); 2477 if (!is_static) { patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); } 2478 { 2479 Label acquire_float; 2480 __ beq(CCR6, acquire_float); // Volatile? 2481 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2482 2483 __ bind(acquire_float); 2484 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2485 __ beq_predict_taken(CCR0, Lisync); 2486 __ b(Lisync); // In case of NAN. 2487 } 2488 2489 __ align(32, 28, 28); // Align load. 2490 // __ bind(Litos); 2491 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2492 assert(branch_table[itos] == 0, "can't compute twice"); 2493 branch_table[itos] = __ pc(); // non-volatile_entry point 2494 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2495 __ push(itos); 2496 if (!is_static) patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch); 2497 __ beq(CCR6, Lacquire); // Volatile? 2498 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2499 2500 __ align(32, 28, 28); // Align load. 2501 // __ bind(Lltos); 2502 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2503 assert(branch_table[ltos] == 0, "can't compute twice"); 2504 branch_table[ltos] = __ pc(); // non-volatile_entry point 2505 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2506 __ push(ltos); 2507 if (!is_static) patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch); 2508 __ beq(CCR6, Lacquire); // Volatile? 2509 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2510 2511 __ align(32, 28, 28); // Align load. 2512 // __ bind(Lbtos); 2513 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2514 assert(branch_table[btos] == 0, "can't compute twice"); 2515 branch_table[btos] = __ pc(); // non-volatile_entry point 2516 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2517 __ extsb(R17_tos, R17_tos); 2518 __ push(btos); 2519 if (!is_static) patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2520 __ beq(CCR6, Lacquire); // Volatile? 2521 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2522 2523 __ align(32, 28, 28); // Align load. 2524 // __ bind(Lztos); (same code as btos) 2525 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2526 assert(branch_table[ztos] == 0, "can't compute twice"); 2527 branch_table[ztos] = __ pc(); // non-volatile_entry point 2528 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2529 __ extsb(R17_tos, R17_tos); 2530 __ push(ztos); 2531 if (!is_static) { 2532 // use btos rewriting, no truncating to t/f bit is needed for getfield. 2533 patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2534 } 2535 __ beq(CCR6, Lacquire); // Volatile? 2536 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2537 2538 __ align(32, 28, 28); // Align load. 2539 // __ bind(Lctos); 2540 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2541 assert(branch_table[ctos] == 0, "can't compute twice"); 2542 branch_table[ctos] = __ pc(); // non-volatile_entry point 2543 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 2544 __ push(ctos); 2545 if (!is_static) patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch); 2546 __ beq(CCR6, Lacquire); // Volatile? 2547 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2548 2549 __ align(32, 28, 28); // Align load. 2550 // __ bind(Lstos); 2551 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2552 assert(branch_table[stos] == 0, "can't compute twice"); 2553 branch_table[stos] = __ pc(); // non-volatile_entry point 2554 __ lhax(R17_tos, Rclass_or_obj, Roffset); 2555 __ push(stos); 2556 if (!is_static) patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch); 2557 __ beq(CCR6, Lacquire); // Volatile? 2558 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2559 2560 __ align(32, 28, 28); // Align load. 2561 // __ bind(Latos); 2562 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2563 assert(branch_table[atos] == 0, "can't compute twice"); 2564 branch_table[atos] = __ pc(); // non-volatile_entry point 2565 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2566 __ verify_oop(R17_tos); 2567 __ push(atos); 2568 //__ dcbt(R17_tos); // prefetch 2569 if (!is_static) patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch); 2570 __ beq(CCR6, Lacquire); // Volatile? 2571 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2572 2573 __ align(32, 12); 2574 __ bind(Lacquire); 2575 __ twi_0(R17_tos); 2576 __ bind(Lisync); 2577 __ isync(); // acquire 2578 2579 #ifdef ASSERT 2580 for (int i = 0; i<number_of_states; ++i) { 2581 assert(branch_table[i], "get initialization"); 2582 //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2583 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2584 } 2585 #endif 2586 } 2587 2588 void TemplateTable::getfield(int byte_no) { 2589 getfield_or_static(byte_no, false); 2590 } 2591 2592 void TemplateTable::getstatic(int byte_no) { 2593 getfield_or_static(byte_no, true); 2594 } 2595 2596 // The registers cache and index expected to be set before call. 2597 // The function may destroy various registers, just not the cache and index registers. 2598 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) { 2599 2600 assert_different_registers(Rcache, Rscratch, R6_ARG4); 2601 2602 if (JvmtiExport::can_post_field_modification()) { 2603 Label Lno_field_mod_post; 2604 2605 // Check if post field access in enabled. 2606 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true); 2607 __ lwz(Rscratch, offs, Rscratch); 2608 2609 __ cmpwi(CCR0, Rscratch, 0); 2610 __ beq(CCR0, Lno_field_mod_post); 2611 2612 // Do the post 2613 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2614 const Register Robj = Rscratch; 2615 2616 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2617 if (is_static) { 2618 // Life is simple. Null out the object pointer. 2619 __ li(Robj, 0); 2620 } else { 2621 // In case of the fast versions, value lives in registers => put it back on tos. 2622 int offs = Interpreter::expr_offset_in_bytes(0); 2623 Register base = R15_esp; 2624 switch(bytecode()) { 2625 case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break; 2626 case Bytecodes::_fast_iputfield: // Fall through 2627 case Bytecodes::_fast_bputfield: // Fall through 2628 case Bytecodes::_fast_zputfield: // Fall through 2629 case Bytecodes::_fast_cputfield: // Fall through 2630 case Bytecodes::_fast_sputfield: __ push_i(); offs+= Interpreter::stackElementSize; break; 2631 case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break; 2632 case Bytecodes::_fast_fputfield: __ push_f(); offs+= Interpreter::stackElementSize; break; 2633 case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break; 2634 default: { 2635 offs = 0; 2636 base = Robj; 2637 const Register Rflags = Robj; 2638 Label is_one_slot; 2639 // Life is harder. The stack holds the value on top, followed by the 2640 // object. We don't know the size of the value, though; it could be 2641 // one or two words depending on its type. As a result, we must find 2642 // the type to determine where the object is. 2643 __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian 2644 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2645 2646 __ cmpwi(CCR0, Rflags, ltos); 2647 __ cmpwi(CCR1, Rflags, dtos); 2648 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1)); 2649 __ crnor(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); 2650 __ beq(CCR0, is_one_slot); 2651 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2)); 2652 __ bind(is_one_slot); 2653 break; 2654 } 2655 } 2656 __ ld(Robj, offs, base); 2657 __ verify_oop(Robj); 2658 } 2659 2660 __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0)); 2661 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4); 2662 __ get_cache_and_index_at_bcp(Rcache, 1); 2663 2664 // In case of the fast versions, value lives in registers => put it back on tos. 2665 switch(bytecode()) { 2666 case Bytecodes::_fast_aputfield: __ pop_ptr(); break; 2667 case Bytecodes::_fast_iputfield: // Fall through 2668 case Bytecodes::_fast_bputfield: // Fall through 2669 case Bytecodes::_fast_zputfield: // Fall through 2670 case Bytecodes::_fast_cputfield: // Fall through 2671 case Bytecodes::_fast_sputfield: __ pop_i(); break; 2672 case Bytecodes::_fast_lputfield: __ pop_l(); break; 2673 case Bytecodes::_fast_fputfield: __ pop_f(); break; 2674 case Bytecodes::_fast_dputfield: __ pop_d(); break; 2675 default: break; // Nothin' to do. 2676 } 2677 2678 __ align(32, 12); 2679 __ bind(Lno_field_mod_post); 2680 } 2681 } 2682 2683 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release). 2684 void TemplateTable::putfield_or_static(int byte_no, bool is_static) { 2685 Label Lvolatile; 2686 2687 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2688 Rclass_or_obj = R31, // Needs to survive C call. 2689 Roffset = R22_tmp2, // Needs to survive C call. 2690 Rflags = R3_ARG1, 2691 Rbtable = R4_ARG2, 2692 Rscratch = R11_scratch1, 2693 Rscratch2 = R12_scratch2, 2694 Rscratch3 = R6_ARG4, 2695 Rbc = Rscratch3; 2696 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2697 2698 static address field_branch_table[number_of_states], 2699 static_branch_table[number_of_states]; 2700 2701 address* branch_table = is_static ? static_branch_table : field_branch_table; 2702 2703 // Stack (grows up): 2704 // value 2705 // obj 2706 2707 // Load the field offset. 2708 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2709 jvmti_post_field_mod(Rcache, Rscratch, is_static); 2710 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2711 2712 // Load pointer to branch table. 2713 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2714 2715 // Get volatile flag. 2716 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2717 2718 // Check the field type. 2719 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2720 2721 #ifdef ASSERT 2722 Label LFlagInvalid; 2723 __ cmpldi(CCR0, Rflags, number_of_states); 2724 __ bge(CCR0, LFlagInvalid); 2725 #endif 2726 2727 // Load from branch table and dispatch (volatile case: one instruction ahead). 2728 __ sldi(Rflags, Rflags, LogBytesPerWord); 2729 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpwi(CR_is_vol, Rscratch, 1); } // Volatile? 2730 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0. 2731 __ ldx(Rbtable, Rbtable, Rflags); 2732 2733 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2734 __ mtctr(Rbtable); 2735 __ bctr(); 2736 2737 #ifdef ASSERT 2738 __ bind(LFlagInvalid); 2739 __ stop("got invalid flag", 0x656); 2740 2741 // __ bind(Lvtos); 2742 address pc_before_release = __ pc(); 2743 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2744 assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2745 assert(branch_table[vtos] == 0, "can't compute twice"); 2746 branch_table[vtos] = __ pc(); // non-volatile_entry point 2747 __ stop("vtos unexpected", 0x657); 2748 #endif 2749 2750 __ align(32, 28, 28); // Align pop. 2751 // __ bind(Ldtos); 2752 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2753 assert(branch_table[dtos] == 0, "can't compute twice"); 2754 branch_table[dtos] = __ pc(); // non-volatile_entry point 2755 __ pop(dtos); 2756 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2757 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2758 if (!is_static) { patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); } 2759 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2760 __ beq(CR_is_vol, Lvolatile); // Volatile? 2761 } 2762 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2763 2764 __ align(32, 28, 28); // Align pop. 2765 // __ bind(Lftos); 2766 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2767 assert(branch_table[ftos] == 0, "can't compute twice"); 2768 branch_table[ftos] = __ pc(); // non-volatile_entry point 2769 __ pop(ftos); 2770 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2771 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2772 if (!is_static) { patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); } 2773 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2774 __ beq(CR_is_vol, Lvolatile); // Volatile? 2775 } 2776 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2777 2778 __ align(32, 28, 28); // Align pop. 2779 // __ bind(Litos); 2780 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2781 assert(branch_table[itos] == 0, "can't compute twice"); 2782 branch_table[itos] = __ pc(); // non-volatile_entry point 2783 __ pop(itos); 2784 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2785 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2786 if (!is_static) { patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); } 2787 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2788 __ beq(CR_is_vol, Lvolatile); // Volatile? 2789 } 2790 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2791 2792 __ align(32, 28, 28); // Align pop. 2793 // __ bind(Lltos); 2794 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2795 assert(branch_table[ltos] == 0, "can't compute twice"); 2796 branch_table[ltos] = __ pc(); // non-volatile_entry point 2797 __ pop(ltos); 2798 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2799 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2800 if (!is_static) { patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); } 2801 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2802 __ beq(CR_is_vol, Lvolatile); // Volatile? 2803 } 2804 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2805 2806 __ align(32, 28, 28); // Align pop. 2807 // __ bind(Lbtos); 2808 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2809 assert(branch_table[btos] == 0, "can't compute twice"); 2810 branch_table[btos] = __ pc(); // non-volatile_entry point 2811 __ pop(btos); 2812 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2813 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2814 if (!is_static) { patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); } 2815 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2816 __ beq(CR_is_vol, Lvolatile); // Volatile? 2817 } 2818 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2819 2820 __ align(32, 28, 28); // Align pop. 2821 // __ bind(Lztos); 2822 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2823 assert(branch_table[ztos] == 0, "can't compute twice"); 2824 branch_table[ztos] = __ pc(); // non-volatile_entry point 2825 __ pop(ztos); 2826 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2827 __ andi(R17_tos, R17_tos, 0x1); 2828 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2829 if (!is_static) { patch_bytecode(Bytecodes::_fast_zputfield, Rbc, Rscratch, true, byte_no); } 2830 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2831 __ beq(CR_is_vol, Lvolatile); // Volatile? 2832 } 2833 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2834 2835 __ align(32, 28, 28); // Align pop. 2836 // __ bind(Lctos); 2837 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2838 assert(branch_table[ctos] == 0, "can't compute twice"); 2839 branch_table[ctos] = __ pc(); // non-volatile_entry point 2840 __ pop(ctos); 2841 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.. 2842 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2843 if (!is_static) { patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); } 2844 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2845 __ beq(CR_is_vol, Lvolatile); // Volatile? 2846 } 2847 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2848 2849 __ align(32, 28, 28); // Align pop. 2850 // __ bind(Lstos); 2851 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2852 assert(branch_table[stos] == 0, "can't compute twice"); 2853 branch_table[stos] = __ pc(); // non-volatile_entry point 2854 __ pop(stos); 2855 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2856 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2857 if (!is_static) { patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); } 2858 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2859 __ beq(CR_is_vol, Lvolatile); // Volatile? 2860 } 2861 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2862 2863 __ align(32, 28, 28); // Align pop. 2864 // __ bind(Latos); 2865 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2866 assert(branch_table[atos] == 0, "can't compute twice"); 2867 branch_table[atos] = __ pc(); // non-volatile_entry point 2868 __ pop(atos); 2869 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1 2870 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 2871 if (!is_static) { patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); } 2872 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2873 __ beq(CR_is_vol, Lvolatile); // Volatile? 2874 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2875 2876 __ align(32, 12); 2877 __ bind(Lvolatile); 2878 __ fence(); 2879 } 2880 // fallthru: __ b(Lexit); 2881 2882 #ifdef ASSERT 2883 for (int i = 0; i<number_of_states; ++i) { 2884 assert(branch_table[i], "put initialization"); 2885 //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2886 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2887 } 2888 #endif 2889 } 2890 2891 void TemplateTable::putfield(int byte_no) { 2892 putfield_or_static(byte_no, false); 2893 } 2894 2895 void TemplateTable::putstatic(int byte_no) { 2896 putfield_or_static(byte_no, true); 2897 } 2898 2899 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job. 2900 void TemplateTable::jvmti_post_fast_field_mod() { 2901 __ should_not_reach_here(); 2902 } 2903 2904 void TemplateTable::fast_storefield(TosState state) { 2905 transition(state, vtos); 2906 2907 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2908 Rclass_or_obj = R31, // Needs to survive C call. 2909 Roffset = R22_tmp2, // Needs to survive C call. 2910 Rflags = R3_ARG1, 2911 Rscratch = R11_scratch1, 2912 Rscratch2 = R12_scratch2, 2913 Rscratch3 = R4_ARG2; 2914 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2915 2916 // Constant pool already resolved => Load flags and offset of field. 2917 __ get_cache_and_index_at_bcp(Rcache, 1); 2918 jvmti_post_field_mod(Rcache, Rscratch, false /* not static */); 2919 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 2920 2921 // Get the obj and the final store addr. 2922 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2923 2924 // Get volatile flag. 2925 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2926 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); } 2927 { 2928 Label LnotVolatile; 2929 __ beq(CCR0, LnotVolatile); 2930 __ release(); 2931 __ align(32, 12); 2932 __ bind(LnotVolatile); 2933 } 2934 2935 // Do the store and fencing. 2936 switch(bytecode()) { 2937 case Bytecodes::_fast_aputfield: 2938 // Store into the field. 2939 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 2940 break; 2941 2942 case Bytecodes::_fast_iputfield: 2943 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2944 break; 2945 2946 case Bytecodes::_fast_lputfield: 2947 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2948 break; 2949 2950 case Bytecodes::_fast_zputfield: 2951 __ andi(R17_tos, R17_tos, 0x1); // boolean is true if LSB is 1 2952 // fall through to bputfield 2953 case Bytecodes::_fast_bputfield: 2954 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2955 break; 2956 2957 case Bytecodes::_fast_cputfield: 2958 case Bytecodes::_fast_sputfield: 2959 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2960 break; 2961 2962 case Bytecodes::_fast_fputfield: 2963 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2964 break; 2965 2966 case Bytecodes::_fast_dputfield: 2967 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2968 break; 2969 2970 default: ShouldNotReachHere(); 2971 } 2972 2973 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2974 Label LVolatile; 2975 __ beq(CR_is_vol, LVolatile); 2976 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2977 2978 __ align(32, 12); 2979 __ bind(LVolatile); 2980 __ fence(); 2981 } 2982 } 2983 2984 void TemplateTable::fast_accessfield(TosState state) { 2985 transition(atos, state); 2986 2987 Label LisVolatile; 2988 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2989 2990 const Register Rcache = R3_ARG1, 2991 Rclass_or_obj = R17_tos, 2992 Roffset = R22_tmp2, 2993 Rflags = R23_tmp3, 2994 Rscratch = R12_scratch2; 2995 2996 // Constant pool already resolved. Get the field offset. 2997 __ get_cache_and_index_at_bcp(Rcache, 1); 2998 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 2999 3000 // JVMTI support 3001 jvmti_post_field_access(Rcache, Rscratch, false, true); 3002 3003 // Get the load address. 3004 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3005 3006 // Get volatile flag. 3007 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3008 __ bne(CCR0, LisVolatile); 3009 3010 switch(bytecode()) { 3011 case Bytecodes::_fast_agetfield: 3012 { 3013 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3014 __ verify_oop(R17_tos); 3015 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3016 3017 __ bind(LisVolatile); 3018 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3019 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3020 __ verify_oop(R17_tos); 3021 __ twi_0(R17_tos); 3022 __ isync(); 3023 break; 3024 } 3025 case Bytecodes::_fast_igetfield: 3026 { 3027 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3028 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3029 3030 __ bind(LisVolatile); 3031 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3032 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3033 __ twi_0(R17_tos); 3034 __ isync(); 3035 break; 3036 } 3037 case Bytecodes::_fast_lgetfield: 3038 { 3039 __ ldx(R17_tos, Rclass_or_obj, Roffset); 3040 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3041 3042 __ bind(LisVolatile); 3043 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3044 __ ldx(R17_tos, Rclass_or_obj, Roffset); 3045 __ twi_0(R17_tos); 3046 __ isync(); 3047 break; 3048 } 3049 case Bytecodes::_fast_bgetfield: 3050 { 3051 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3052 __ extsb(R17_tos, R17_tos); 3053 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3054 3055 __ bind(LisVolatile); 3056 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3057 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3058 __ twi_0(R17_tos); 3059 __ extsb(R17_tos, R17_tos); 3060 __ isync(); 3061 break; 3062 } 3063 case Bytecodes::_fast_cgetfield: 3064 { 3065 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3066 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3067 3068 __ bind(LisVolatile); 3069 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3070 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3071 __ twi_0(R17_tos); 3072 __ isync(); 3073 break; 3074 } 3075 case Bytecodes::_fast_sgetfield: 3076 { 3077 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3078 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3079 3080 __ bind(LisVolatile); 3081 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3082 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3083 __ twi_0(R17_tos); 3084 __ isync(); 3085 break; 3086 } 3087 case Bytecodes::_fast_fgetfield: 3088 { 3089 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3090 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3091 3092 __ bind(LisVolatile); 3093 Label Ldummy; 3094 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3095 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3096 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3097 __ bne_predict_not_taken(CCR0, Ldummy); 3098 __ bind(Ldummy); 3099 __ isync(); 3100 break; 3101 } 3102 case Bytecodes::_fast_dgetfield: 3103 { 3104 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3105 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3106 3107 __ bind(LisVolatile); 3108 Label Ldummy; 3109 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3110 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3111 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3112 __ bne_predict_not_taken(CCR0, Ldummy); 3113 __ bind(Ldummy); 3114 __ isync(); 3115 break; 3116 } 3117 default: ShouldNotReachHere(); 3118 } 3119 } 3120 3121 void TemplateTable::fast_xaccess(TosState state) { 3122 transition(vtos, state); 3123 3124 Label LisVolatile; 3125 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3126 const Register Rcache = R3_ARG1, 3127 Rclass_or_obj = R17_tos, 3128 Roffset = R22_tmp2, 3129 Rflags = R23_tmp3, 3130 Rscratch = R12_scratch2; 3131 3132 __ ld(Rclass_or_obj, 0, R18_locals); 3133 3134 // Constant pool already resolved. Get the field offset. 3135 __ get_cache_and_index_at_bcp(Rcache, 2); 3136 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3137 3138 // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches. 3139 3140 // Needed to report exception at the correct bcp. 3141 __ addi(R14_bcp, R14_bcp, 1); 3142 3143 // Get the load address. 3144 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3145 3146 // Get volatile flag. 3147 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3148 __ bne(CCR0, LisVolatile); 3149 3150 switch(state) { 3151 case atos: 3152 { 3153 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3154 __ verify_oop(R17_tos); 3155 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3156 3157 __ bind(LisVolatile); 3158 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3159 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3160 __ verify_oop(R17_tos); 3161 __ twi_0(R17_tos); 3162 __ isync(); 3163 break; 3164 } 3165 case itos: 3166 { 3167 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3168 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3169 3170 __ bind(LisVolatile); 3171 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3172 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3173 __ twi_0(R17_tos); 3174 __ isync(); 3175 break; 3176 } 3177 case ftos: 3178 { 3179 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3180 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3181 3182 __ bind(LisVolatile); 3183 Label Ldummy; 3184 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3185 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3186 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3187 __ bne_predict_not_taken(CCR0, Ldummy); 3188 __ bind(Ldummy); 3189 __ isync(); 3190 break; 3191 } 3192 default: ShouldNotReachHere(); 3193 } 3194 __ addi(R14_bcp, R14_bcp, -1); 3195 } 3196 3197 // ============================================================================ 3198 // Calls 3199 3200 // Common code for invoke 3201 // 3202 // Input: 3203 // - byte_no 3204 // 3205 // Output: 3206 // - Rmethod: The method to invoke next. 3207 // - Rret_addr: The return address to return to. 3208 // - Rindex: MethodType (invokehandle) or CallSite obj (invokedynamic) 3209 // - Rrecv: Cache for "this" pointer, might be noreg if static call. 3210 // - Rflags: Method flags from const pool cache. 3211 // 3212 // Kills: 3213 // - Rscratch1 3214 // 3215 void TemplateTable::prepare_invoke(int byte_no, 3216 Register Rmethod, // linked method (or i-klass) 3217 Register Rret_addr,// return address 3218 Register Rindex, // itable index, MethodType, etc. 3219 Register Rrecv, // If caller wants to see it. 3220 Register Rflags, // If caller wants to test it. 3221 Register Rscratch 3222 ) { 3223 // Determine flags. 3224 const Bytecodes::Code code = bytecode(); 3225 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 3226 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 3227 const bool is_invokehandle = code == Bytecodes::_invokehandle; 3228 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 3229 const bool is_invokespecial = code == Bytecodes::_invokespecial; 3230 const bool load_receiver = (Rrecv != noreg); 3231 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 3232 3233 assert_different_registers(Rmethod, Rindex, Rflags, Rscratch); 3234 assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch); 3235 assert_different_registers(Rret_addr, Rscratch); 3236 3237 load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic); 3238 3239 // Saving of SP done in call_from_interpreter. 3240 3241 // Maybe push "appendix" to arguments. 3242 if (is_invokedynamic || is_invokehandle) { 3243 Label Ldone; 3244 __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63); 3245 __ beq(CCR0, Ldone); 3246 // Push "appendix" (MethodType, CallSite, etc.). 3247 // This must be done before we get the receiver, 3248 // since the parameter_size includes it. 3249 __ load_resolved_reference_at_index(Rscratch, Rindex); 3250 __ verify_oop(Rscratch); 3251 __ push_ptr(Rscratch); 3252 __ bind(Ldone); 3253 } 3254 3255 // Load receiver if needed (after appendix is pushed so parameter size is correct). 3256 if (load_receiver) { 3257 const Register Rparam_count = Rscratch; 3258 __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask); 3259 __ load_receiver(Rparam_count, Rrecv); 3260 __ verify_oop(Rrecv); 3261 } 3262 3263 // Get return address. 3264 { 3265 Register Rtable_addr = Rscratch; 3266 Register Rret_type = Rret_addr; 3267 address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 3268 3269 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3270 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3271 __ load_dispatch_table(Rtable_addr, (address*)table_addr); 3272 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3273 // Get return address. 3274 __ ldx(Rret_addr, Rtable_addr, Rret_type); 3275 } 3276 } 3277 3278 // Helper for virtual calls. Load target out of vtable and jump off! 3279 // Kills all passed registers. 3280 void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) { 3281 3282 assert_different_registers(Rrecv_klass, Rtemp, Rret); 3283 const Register Rtarget_method = Rindex; 3284 3285 // Get target method & entry point. 3286 const int base = InstanceKlass::vtable_start_offset() * wordSize; 3287 // Calc vtable addr scale the vtable index by 8. 3288 __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size() * wordSize)); 3289 // Load target. 3290 __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes()); 3291 __ ldx(Rtarget_method, Rindex, Rrecv_klass); 3292 // Argument and return type profiling. 3293 __ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true); 3294 __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */); 3295 } 3296 3297 // Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time. 3298 void TemplateTable::invokevirtual(int byte_no) { 3299 transition(vtos, vtos); 3300 3301 Register Rtable_addr = R11_scratch1, 3302 Rret_type = R12_scratch2, 3303 Rret_addr = R5_ARG3, 3304 Rflags = R22_tmp2, // Should survive C call. 3305 Rrecv = R3_ARG1, 3306 Rrecv_klass = Rrecv, 3307 Rvtableindex_or_method = R31, // Should survive C call. 3308 Rnum_params = R4_ARG2, 3309 Rnew_bc = R6_ARG4; 3310 3311 Label LnotFinal; 3312 3313 load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false); 3314 3315 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3316 __ bfalse(CCR0, LnotFinal); 3317 3318 patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2); 3319 invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2); 3320 3321 __ align(32, 12); 3322 __ bind(LnotFinal); 3323 // Load "this" pointer (receiver). 3324 __ rldicl(Rnum_params, Rflags, 64, 48); 3325 __ load_receiver(Rnum_params, Rrecv); 3326 __ verify_oop(Rrecv); 3327 3328 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3329 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3330 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3331 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3332 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3333 __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1); 3334 __ load_klass(Rrecv_klass, Rrecv); 3335 __ verify_klass_ptr(Rrecv_klass); 3336 __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false); 3337 3338 generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1); 3339 } 3340 3341 void TemplateTable::fast_invokevfinal(int byte_no) { 3342 transition(vtos, vtos); 3343 3344 assert(byte_no == f2_byte, "use this argument"); 3345 Register Rflags = R22_tmp2, 3346 Rmethod = R31; 3347 load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false); 3348 invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2); 3349 } 3350 3351 void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) { 3352 3353 assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2); 3354 3355 // Load receiver from stack slot. 3356 Register Rrecv = Rscratch2; 3357 Register Rnum_params = Rrecv; 3358 3359 __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod); 3360 __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params); 3361 3362 // Get return address. 3363 Register Rtable_addr = Rscratch1, 3364 Rret_addr = Rflags, 3365 Rret_type = Rret_addr; 3366 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3367 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3368 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3369 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3370 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3371 3372 // Load receiver and receiver NULL check. 3373 __ load_receiver(Rnum_params, Rrecv); 3374 __ null_check_throw(Rrecv, -1, Rscratch1); 3375 3376 __ profile_final_call(Rrecv, Rscratch1); 3377 // Argument and return type profiling. 3378 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3379 3380 // Do the call. 3381 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2); 3382 } 3383 3384 void TemplateTable::invokespecial(int byte_no) { 3385 assert(byte_no == f1_byte, "use this argument"); 3386 transition(vtos, vtos); 3387 3388 Register Rtable_addr = R3_ARG1, 3389 Rret_addr = R4_ARG2, 3390 Rflags = R5_ARG3, 3391 Rreceiver = R6_ARG4, 3392 Rmethod = R31; 3393 3394 prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1); 3395 3396 // Receiver NULL check. 3397 __ null_check_throw(Rreceiver, -1, R11_scratch1); 3398 3399 __ profile_call(R11_scratch1, R12_scratch2); 3400 // Argument and return type profiling. 3401 __ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false); 3402 __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2); 3403 } 3404 3405 void TemplateTable::invokestatic(int byte_no) { 3406 assert(byte_no == f1_byte, "use this argument"); 3407 transition(vtos, vtos); 3408 3409 Register Rtable_addr = R3_ARG1, 3410 Rret_addr = R4_ARG2, 3411 Rflags = R5_ARG3; 3412 3413 prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1); 3414 3415 __ profile_call(R11_scratch1, R12_scratch2); 3416 // Argument and return type profiling. 3417 __ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false); 3418 __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2); 3419 } 3420 3421 void TemplateTable::invokeinterface_object_method(Register Rrecv_klass, 3422 Register Rret, 3423 Register Rflags, 3424 Register Rindex, 3425 Register Rtemp1, 3426 Register Rtemp2) { 3427 3428 assert_different_registers(Rindex, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2); 3429 Label LnotFinal; 3430 3431 // Check for vfinal. 3432 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3433 __ bfalse(CCR0, LnotFinal); 3434 3435 Register Rscratch = Rflags; // Rflags is dead now. 3436 3437 // Final call case. 3438 __ profile_final_call(Rtemp1, Rscratch); 3439 // Argument and return type profiling. 3440 __ profile_arguments_type(Rindex, Rscratch, Rrecv_klass /* scratch */, true); 3441 // Do the final call - the index (f2) contains the method. 3442 __ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */); 3443 3444 // Non-final callc case. 3445 __ bind(LnotFinal); 3446 __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false); 3447 generate_vtable_call(Rrecv_klass, Rindex, Rret, Rscratch); 3448 } 3449 3450 void TemplateTable::invokeinterface(int byte_no) { 3451 assert(byte_no == f1_byte, "use this argument"); 3452 transition(vtos, vtos); 3453 3454 const Register Rscratch1 = R11_scratch1, 3455 Rscratch2 = R12_scratch2, 3456 Rscratch3 = R9_ARG7, 3457 Rscratch4 = R10_ARG8, 3458 Rtable_addr = Rscratch2, 3459 Rinterface_klass = R5_ARG3, 3460 Rret_type = R8_ARG6, 3461 Rret_addr = Rret_type, 3462 Rindex = R6_ARG4, 3463 Rreceiver = R4_ARG2, 3464 Rrecv_klass = Rreceiver, 3465 Rflags = R7_ARG5; 3466 3467 prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rindex, Rreceiver, Rflags, Rscratch1); 3468 3469 // Get receiver klass. 3470 __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch3); 3471 __ load_klass(Rrecv_klass, Rreceiver); 3472 3473 // Check corner case object method. 3474 Label LobjectMethod; 3475 3476 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift); 3477 __ btrue(CCR0, LobjectMethod); 3478 3479 // Fallthrough: The normal invokeinterface case. 3480 __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false); 3481 3482 // Find entry point to call. 3483 Label Lthrow_icc, Lthrow_ame; 3484 // Result will be returned in Rindex. 3485 __ mr(Rscratch4, Rrecv_klass); 3486 __ mr(Rscratch3, Rindex); 3487 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rindex, Rscratch1, Rscratch2, Lthrow_icc); 3488 3489 __ cmpdi(CCR0, Rindex, 0); 3490 __ beq(CCR0, Lthrow_ame); 3491 // Found entry. Jump off! 3492 // Argument and return type profiling. 3493 __ profile_arguments_type(Rindex, Rscratch1, Rscratch2, true); 3494 __ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2); 3495 3496 // Vtable entry was NULL => Throw abstract method error. 3497 __ bind(Lthrow_ame); 3498 __ mr(Rrecv_klass, Rscratch4); 3499 __ mr(Rindex, Rscratch3); 3500 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 3501 3502 // Interface was not found => Throw incompatible class change error. 3503 __ bind(Lthrow_icc); 3504 __ mr(Rrecv_klass, Rscratch4); 3505 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); 3506 3507 __ should_not_reach_here(); 3508 3509 // Special case of invokeinterface called for virtual method of 3510 // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details: 3511 // The invokeinterface was rewritten to a invokevirtual, hence we have 3512 // to handle this corner case. This code isn't produced by javac, but could 3513 // be produced by another compliant java compiler. 3514 __ bind(LobjectMethod); 3515 invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rindex, Rscratch1, Rscratch2); 3516 } 3517 3518 void TemplateTable::invokedynamic(int byte_no) { 3519 transition(vtos, vtos); 3520 3521 const Register Rret_addr = R3_ARG1, 3522 Rflags = R4_ARG2, 3523 Rmethod = R22_tmp2, 3524 Rscratch1 = R11_scratch1, 3525 Rscratch2 = R12_scratch2; 3526 3527 if (!EnableInvokeDynamic) { 3528 // We should not encounter this bytecode if !EnableInvokeDynamic. 3529 // The verifier will stop it. However, if we get past the verifier, 3530 // this will stop the thread in a reasonable way, without crashing the JVM. 3531 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); 3532 // The call_VM checks for exception, so we should never return here. 3533 __ should_not_reach_here(); 3534 return; 3535 } 3536 3537 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2); 3538 3539 // Profile this call. 3540 __ profile_call(Rscratch1, Rscratch2); 3541 3542 // Off we go. With the new method handles, we don't jump to a method handle 3543 // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens 3544 // to be the callsite object the bootstrap method returned. This is passed to a 3545 // "link" method which does the dispatch (Most likely just grabs the MH stored 3546 // inside the callsite and does an invokehandle). 3547 // Argument and return type profiling. 3548 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false); 3549 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3550 } 3551 3552 void TemplateTable::invokehandle(int byte_no) { 3553 transition(vtos, vtos); 3554 3555 const Register Rret_addr = R3_ARG1, 3556 Rflags = R4_ARG2, 3557 Rrecv = R5_ARG3, 3558 Rmethod = R22_tmp2, 3559 Rscratch1 = R11_scratch1, 3560 Rscratch2 = R12_scratch2; 3561 3562 if (!EnableInvokeDynamic) { 3563 // Rewriter does not generate this bytecode. 3564 __ should_not_reach_here(); 3565 return; 3566 } 3567 3568 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2); 3569 __ verify_method_ptr(Rmethod); 3570 __ null_check_throw(Rrecv, -1, Rscratch2); 3571 3572 __ profile_final_call(Rrecv, Rscratch1); 3573 3574 // Still no call from handle => We call the method handle interpreter here. 3575 // Argument and return type profiling. 3576 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3577 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3578 } 3579 3580 // ============================================================================= 3581 // Allocation 3582 3583 // Puts allocated obj ref onto the expression stack. 3584 void TemplateTable::_new() { 3585 transition(vtos, atos); 3586 3587 Label Lslow_case, 3588 Ldone, 3589 Linitialize_header, 3590 Lallocate_shared, 3591 Linitialize_object; // Including clearing the fields. 3592 3593 const Register RallocatedObject = R17_tos, 3594 RinstanceKlass = R9_ARG7, 3595 Rscratch = R11_scratch1, 3596 Roffset = R8_ARG6, 3597 Rinstance_size = Roffset, 3598 Rcpool = R4_ARG2, 3599 Rtags = R3_ARG1, 3600 Rindex = R5_ARG3; 3601 3602 const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; 3603 3604 // -------------------------------------------------------------------------- 3605 // Check if fast case is possible. 3606 3607 // Load pointers to const pool and const pool's tags array. 3608 __ get_cpool_and_tags(Rcpool, Rtags); 3609 // Load index of constant pool entry. 3610 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 3611 3612 if (UseTLAB) { 3613 // Make sure the class we're about to instantiate has been resolved 3614 // This is done before loading instanceKlass to be consistent with the order 3615 // how Constant Pool is updated (see ConstantPoolCache::klass_at_put). 3616 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3617 __ lbzx(Rtags, Rindex, Rtags); 3618 3619 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3620 __ bne(CCR0, Lslow_case); 3621 3622 // Get instanceKlass (load from Rcpool + sizeof(ConstantPool) + Rindex*BytesPerWord). 3623 __ sldi(Roffset, Rindex, LogBytesPerWord); 3624 __ addi(Rscratch, Rcpool, sizeof(ConstantPool)); 3625 __ isync(); // Order load of instance Klass wrt. tags. 3626 __ ldx(RinstanceKlass, Roffset, Rscratch); 3627 3628 // Make sure klass is fully initialized and get instance_size. 3629 __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass); 3630 __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass); 3631 3632 __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized); 3633 // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class. 3634 __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0? 3635 3636 __ crnand(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); // slow path bit set or not fully initialized? 3637 __ beq(CCR0, Lslow_case); 3638 3639 // -------------------------------------------------------------------------- 3640 // Fast case: 3641 // Allocate the instance. 3642 // 1) Try to allocate in the TLAB. 3643 // 2) If fail, and the TLAB is not full enough to discard, allocate in the shared Eden. 3644 // 3) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.). 3645 3646 Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits. 3647 Register RnewTopValue = R6_ARG4; 3648 Register RendValue = R7_ARG5; 3649 3650 // Check if we can allocate in the TLAB. 3651 __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3652 __ ld(RendValue, in_bytes(JavaThread::tlab_end_offset()), R16_thread); 3653 3654 __ add(RnewTopValue, Rinstance_size, RoldTopValue); 3655 3656 // If there is enough space, we do not CAS and do not clear. 3657 __ cmpld(CCR0, RnewTopValue, RendValue); 3658 __ bgt(CCR0, allow_shared_alloc ? Lallocate_shared : Lslow_case); 3659 3660 __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3661 3662 if (ZeroTLAB) { 3663 // The fields have already been cleared. 3664 __ b(Linitialize_header); 3665 } else { 3666 // Initialize both the header and fields. 3667 __ b(Linitialize_object); 3668 } 3669 3670 // Fall through: TLAB was too small. 3671 if (allow_shared_alloc) { 3672 Register RtlabWasteLimitValue = R10_ARG8; 3673 Register RfreeValue = RnewTopValue; 3674 3675 __ bind(Lallocate_shared); 3676 // Check if tlab should be discarded (refill_waste_limit >= free). 3677 __ ld(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread); 3678 __ subf(RfreeValue, RoldTopValue, RendValue); 3679 __ srdi(RfreeValue, RfreeValue, LogHeapWordSize); // in dwords 3680 __ cmpld(CCR0, RtlabWasteLimitValue, RfreeValue); 3681 __ bge(CCR0, Lslow_case); 3682 3683 // Increment waste limit to prevent getting stuck on this slow path. 3684 __ addi(RtlabWasteLimitValue, RtlabWasteLimitValue, (int)ThreadLocalAllocBuffer::refill_waste_limit_increment()); 3685 __ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread); 3686 } 3687 // else: No allocation in the shared eden. // fallthru: __ b(Lslow_case); 3688 } 3689 // else: Always go the slow path. 3690 3691 // -------------------------------------------------------------------------- 3692 // slow case 3693 __ bind(Lslow_case); 3694 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex); 3695 3696 if (UseTLAB) { 3697 __ b(Ldone); 3698 // -------------------------------------------------------------------------- 3699 // Init1: Zero out newly allocated memory. 3700 3701 if (!ZeroTLAB || allow_shared_alloc) { 3702 // Clear object fields. 3703 __ bind(Linitialize_object); 3704 3705 // Initialize remaining object fields. 3706 Register Rbase = Rtags; 3707 __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc)); 3708 __ addi(Rbase, RallocatedObject, sizeof(oopDesc)); 3709 __ srdi(Rinstance_size, Rinstance_size, 3); 3710 3711 // Clear out object skipping header. Takes also care of the zero length case. 3712 __ clear_memory_doubleword(Rbase, Rinstance_size); 3713 // fallthru: __ b(Linitialize_header); 3714 } 3715 3716 // -------------------------------------------------------------------------- 3717 // Init2: Initialize the header: mark, klass 3718 __ bind(Linitialize_header); 3719 3720 // Init mark. 3721 if (UseBiasedLocking) { 3722 __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass); 3723 } else { 3724 __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0); 3725 } 3726 __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject); 3727 3728 // Init klass. 3729 __ store_klass_gap(RallocatedObject); 3730 __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms) 3731 3732 // Check and trigger dtrace event. 3733 { 3734 SkipIfEqualZero skip_if(_masm, Rscratch, &DTraceAllocProbes); 3735 __ push(atos); 3736 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)); 3737 __ pop(atos); 3738 } 3739 } 3740 3741 // continue 3742 __ bind(Ldone); 3743 3744 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3745 __ membar(Assembler::StoreStore); 3746 } 3747 3748 void TemplateTable::newarray() { 3749 transition(itos, atos); 3750 3751 __ lbz(R4, 1, R14_bcp); 3752 __ extsw(R5, R17_tos); 3753 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */); 3754 3755 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3756 __ membar(Assembler::StoreStore); 3757 } 3758 3759 void TemplateTable::anewarray() { 3760 transition(itos, atos); 3761 3762 __ get_constant_pool(R4); 3763 __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned); 3764 __ extsw(R6, R17_tos); // size 3765 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */); 3766 3767 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3768 __ membar(Assembler::StoreStore); 3769 } 3770 3771 // Allocate a multi dimensional array 3772 void TemplateTable::multianewarray() { 3773 transition(vtos, atos); 3774 3775 Register Rptr = R31; // Needs to survive C call. 3776 3777 // Put ndims * wordSize into frame temp slot 3778 __ lbz(Rptr, 3, R14_bcp); 3779 __ sldi(Rptr, Rptr, Interpreter::logStackElementSize); 3780 // Esp points past last_dim, so set to R4 to first_dim address. 3781 __ add(R4, Rptr, R15_esp); 3782 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */); 3783 // Pop all dimensions off the stack. 3784 __ add(R15_esp, Rptr, R15_esp); 3785 3786 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3787 __ membar(Assembler::StoreStore); 3788 } 3789 3790 void TemplateTable::arraylength() { 3791 transition(atos, itos); 3792 3793 Label LnoException; 3794 __ verify_oop(R17_tos); 3795 __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1); 3796 __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos); 3797 } 3798 3799 // ============================================================================ 3800 // Typechecks 3801 3802 void TemplateTable::checkcast() { 3803 transition(atos, atos); 3804 3805 Label Ldone, Lis_null, Lquicked, Lresolved; 3806 Register Roffset = R6_ARG4, 3807 RobjKlass = R4_ARG2, 3808 RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register. 3809 Rcpool = R11_scratch1, 3810 Rtags = R12_scratch2; 3811 3812 // Null does not pass. 3813 __ cmpdi(CCR0, R17_tos, 0); 3814 __ beq(CCR0, Lis_null); 3815 3816 // Get constant pool tag to find out if the bytecode has already been "quickened". 3817 __ get_cpool_and_tags(Rcpool, Rtags); 3818 3819 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3820 3821 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3822 __ lbzx(Rtags, Rtags, Roffset); 3823 3824 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3825 __ beq(CCR0, Lquicked); 3826 3827 // Call into the VM to "quicken" instanceof. 3828 __ push_ptr(); // for GC 3829 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3830 __ get_vm_result_2(RspecifiedKlass); 3831 __ pop_ptr(); // Restore receiver. 3832 __ b(Lresolved); 3833 3834 // Extract target class from constant pool. 3835 __ bind(Lquicked); 3836 __ sldi(Roffset, Roffset, LogBytesPerWord); 3837 __ addi(Rcpool, Rcpool, sizeof(ConstantPool)); 3838 __ isync(); // Order load of specified Klass wrt. tags. 3839 __ ldx(RspecifiedKlass, Rcpool, Roffset); 3840 3841 // Do the checkcast. 3842 __ bind(Lresolved); 3843 // Get value klass in RobjKlass. 3844 __ load_klass(RobjKlass, R17_tos); 3845 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3846 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3847 3848 // Not a subtype; so must throw exception 3849 // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention. 3850 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry); 3851 __ mtctr(R11_scratch1); 3852 __ bctr(); 3853 3854 // Profile the null case. 3855 __ align(32, 12); 3856 __ bind(Lis_null); 3857 __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch. 3858 3859 __ align(32, 12); 3860 __ bind(Ldone); 3861 } 3862 3863 // Output: 3864 // - tos == 0: Obj was null or not an instance of class. 3865 // - tos == 1: Obj was an instance of class. 3866 void TemplateTable::instanceof() { 3867 transition(atos, itos); 3868 3869 Label Ldone, Lis_null, Lquicked, Lresolved; 3870 Register Roffset = R5_ARG3, 3871 RobjKlass = R4_ARG2, 3872 RspecifiedKlass = R6_ARG4, // Generate_ClassCastException_verbose_handler will expect the value in this register. 3873 Rcpool = R11_scratch1, 3874 Rtags = R12_scratch2; 3875 3876 // Null does not pass. 3877 __ cmpdi(CCR0, R17_tos, 0); 3878 __ beq(CCR0, Lis_null); 3879 3880 // Get constant pool tag to find out if the bytecode has already been "quickened". 3881 __ get_cpool_and_tags(Rcpool, Rtags); 3882 3883 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3884 3885 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3886 __ lbzx(Rtags, Rtags, Roffset); 3887 3888 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3889 __ beq(CCR0, Lquicked); 3890 3891 // Call into the VM to "quicken" instanceof. 3892 __ push_ptr(); // for GC 3893 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3894 __ get_vm_result_2(RspecifiedKlass); 3895 __ pop_ptr(); // Restore receiver. 3896 __ b(Lresolved); 3897 3898 // Extract target class from constant pool. 3899 __ bind(Lquicked); 3900 __ sldi(Roffset, Roffset, LogBytesPerWord); 3901 __ addi(Rcpool, Rcpool, sizeof(ConstantPool)); 3902 __ isync(); // Order load of specified Klass wrt. tags. 3903 __ ldx(RspecifiedKlass, Rcpool, Roffset); 3904 3905 // Do the checkcast. 3906 __ bind(Lresolved); 3907 // Get value klass in RobjKlass. 3908 __ load_klass(RobjKlass, R17_tos); 3909 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3910 __ li(R17_tos, 1); 3911 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3912 __ li(R17_tos, 0); 3913 3914 if (ProfileInterpreter) { 3915 __ b(Ldone); 3916 } 3917 3918 // Profile the null case. 3919 __ align(32, 12); 3920 __ bind(Lis_null); 3921 __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch. 3922 3923 __ align(32, 12); 3924 __ bind(Ldone); 3925 } 3926 3927 // ============================================================================= 3928 // Breakpoints 3929 3930 void TemplateTable::_breakpoint() { 3931 transition(vtos, vtos); 3932 3933 // Get the unpatched byte code. 3934 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp); 3935 __ mr(R31, R3_RET); 3936 3937 // Post the breakpoint event. 3938 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp); 3939 3940 // Complete the execution of original bytecode. 3941 __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos)); 3942 } 3943 3944 // ============================================================================= 3945 // Exceptions 3946 3947 void TemplateTable::athrow() { 3948 transition(atos, vtos); 3949 3950 // Exception oop is in tos 3951 __ verify_oop(R17_tos); 3952 3953 __ null_check_throw(R17_tos, -1, R11_scratch1); 3954 3955 // Throw exception interpreter entry expects exception oop to be in R3. 3956 __ mr(R3_RET, R17_tos); 3957 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry()); 3958 __ mtctr(R11_scratch1); 3959 __ bctr(); 3960 } 3961 3962 // ============================================================================= 3963 // Synchronization 3964 // Searches the basic object lock list on the stack for a free slot 3965 // and uses it to lock the obect in tos. 3966 // 3967 // Recursive locking is enabled by exiting the search if the same 3968 // object is already found in the list. Thus, a new basic lock obj lock 3969 // is allocated "higher up" in the stack and thus is found first 3970 // at next monitor exit. 3971 void TemplateTable::monitorenter() { 3972 transition(atos, vtos); 3973 3974 __ verify_oop(R17_tos); 3975 3976 Register Rcurrent_monitor = R11_scratch1, 3977 Rcurrent_obj = R12_scratch2, 3978 Robj_to_lock = R17_tos, 3979 Rscratch1 = R3_ARG1, 3980 Rscratch2 = R4_ARG2, 3981 Rscratch3 = R5_ARG3, 3982 Rcurrent_obj_addr = R6_ARG4; 3983 3984 // ------------------------------------------------------------------------------ 3985 // Null pointer exception. 3986 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 3987 3988 // Try to acquire a lock on the object. 3989 // Repeat until succeeded (i.e., until monitorenter returns true). 3990 3991 // ------------------------------------------------------------------------------ 3992 // Find a free slot in the monitor block. 3993 Label Lfound, Lexit, Lallocate_new; 3994 ConditionRegister found_free_slot = CCR0, 3995 found_same_obj = CCR1, 3996 reached_limit = CCR6; 3997 { 3998 Label Lloop, Lentry; 3999 Register Rlimit = Rcurrent_monitor; 4000 4001 // Set up search loop - start with topmost monitor. 4002 __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor); 4003 4004 __ ld(Rlimit, 0, R1_SP); 4005 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base 4006 4007 // Check if any slot is present => short cut to allocation if not. 4008 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 4009 __ bgt(reached_limit, Lallocate_new); 4010 4011 // Pre-load topmost slot. 4012 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4013 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4014 // The search loop. 4015 __ bind(Lloop); 4016 // Found free slot? 4017 __ cmpdi(found_free_slot, Rcurrent_obj, 0); 4018 // Is this entry for same obj? If so, stop the search and take the found 4019 // free slot or allocate a new one to enable recursive locking. 4020 __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock); 4021 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 4022 __ beq(found_free_slot, Lexit); 4023 __ beq(found_same_obj, Lallocate_new); 4024 __ bgt(reached_limit, Lallocate_new); 4025 // Check if last allocated BasicLockObj reached. 4026 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4027 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4028 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4029 __ b(Lloop); 4030 } 4031 4032 // ------------------------------------------------------------------------------ 4033 // Check if we found a free slot. 4034 __ bind(Lexit); 4035 4036 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4037 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize); 4038 __ b(Lfound); 4039 4040 // We didn't find a free BasicObjLock => allocate one. 4041 __ align(32, 12); 4042 __ bind(Lallocate_new); 4043 __ add_monitor_to_stack(false, Rscratch1, Rscratch2); 4044 __ mr(Rcurrent_monitor, R26_monitor); 4045 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4046 4047 // ------------------------------------------------------------------------------ 4048 // We now have a slot to lock. 4049 __ bind(Lfound); 4050 4051 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 4052 // The object has already been poped from the stack, so the expression stack looks correct. 4053 __ addi(R14_bcp, R14_bcp, 1); 4054 4055 __ std(Robj_to_lock, 0, Rcurrent_obj_addr); 4056 __ lock_object(Rcurrent_monitor, Robj_to_lock); 4057 4058 // Check if there's enough space on the stack for the monitors after locking. 4059 Label Lskip_stack_check; 4060 // Optimization: If the monitors stack section is less then a std page size (4K) don't run 4061 // the stack check. There should be enough shadow pages to fit that in. 4062 __ ld(Rscratch3, 0, R1_SP); 4063 __ sub(Rscratch3, Rscratch3, R26_monitor); 4064 __ cmpdi(CCR0, Rscratch3, 4*K); 4065 __ blt(CCR0, Lskip_stack_check); 4066 4067 DEBUG_ONLY(__ untested("stack overflow check during monitor enter");) 4068 __ li(Rscratch1, 0); 4069 __ generate_stack_overflow_check_with_compare_and_throw(Rscratch1, Rscratch2); 4070 4071 __ align(32, 12); 4072 __ bind(Lskip_stack_check); 4073 4074 // The bcp has already been incremented. Just need to dispatch to next instruction. 4075 __ dispatch_next(vtos); 4076 } 4077 4078 void TemplateTable::monitorexit() { 4079 transition(atos, vtos); 4080 __ verify_oop(R17_tos); 4081 4082 Register Rcurrent_monitor = R11_scratch1, 4083 Rcurrent_obj = R12_scratch2, 4084 Robj_to_lock = R17_tos, 4085 Rcurrent_obj_addr = R3_ARG1, 4086 Rlimit = R4_ARG2; 4087 Label Lfound, Lillegal_monitor_state; 4088 4089 // Check corner case: unbalanced monitorEnter / Exit. 4090 __ ld(Rlimit, 0, R1_SP); 4091 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base 4092 4093 // Null pointer check. 4094 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 4095 4096 __ cmpld(CCR0, R26_monitor, Rlimit); 4097 __ bgt(CCR0, Lillegal_monitor_state); 4098 4099 // Find the corresponding slot in the monitors stack section. 4100 { 4101 Label Lloop; 4102 4103 // Start with topmost monitor. 4104 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4105 __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes()); 4106 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4107 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4108 4109 __ bind(Lloop); 4110 // Is this entry for same obj? 4111 __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock); 4112 __ beq(CCR0, Lfound); 4113 4114 // Check if last allocated BasicLockObj reached. 4115 4116 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4117 __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit); 4118 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4119 4120 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4121 __ ble(CCR0, Lloop); 4122 } 4123 4124 // Fell through without finding the basic obj lock => throw up! 4125 __ bind(Lillegal_monitor_state); 4126 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 4127 __ should_not_reach_here(); 4128 4129 __ align(32, 12); 4130 __ bind(Lfound); 4131 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, 4132 -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4133 __ unlock_object(Rcurrent_monitor); 4134 } 4135 4136 // ============================================================================ 4137 // Wide bytecodes 4138 4139 // Wide instructions. Simply redirects to the wide entry point for that instruction. 4140 void TemplateTable::wide() { 4141 transition(vtos, vtos); 4142 4143 const Register Rtable = R11_scratch1, 4144 Rindex = R12_scratch2, 4145 Rtmp = R0; 4146 4147 __ lbz(Rindex, 1, R14_bcp); 4148 4149 __ load_dispatch_table(Rtable, Interpreter::_wentry_point); 4150 4151 __ slwi(Rindex, Rindex, LogBytesPerWord); 4152 __ ldx(Rtmp, Rtable, Rindex); 4153 __ mtctr(Rtmp); 4154 __ bctr(); 4155 // Note: the bcp increment step is part of the individual wide bytecode implementations. 4156 } 4157 #endif // !CC_INTERP