1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interpreter/interpreter.hpp" 27 #include "interpreter/interpreterRuntime.hpp" 28 #include "interpreter/interp_masm.hpp" 29 #include "interpreter/templateTable.hpp" 30 #include "memory/universe.hpp" 31 #include "oops/methodData.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/sharedRuntime.hpp" 36 #include "runtime/stubRoutines.hpp" 37 #include "runtime/synchronizer.hpp" 38 #include "utilities/macros.hpp" 39 40 #define __ _masm-> 41 42 // Misc helpers 43 44 // Do an oop store like *(base + index + offset) = val 45 // index can be noreg, 46 static void do_oop_store(InterpreterMacroAssembler* _masm, 47 Register base, 48 Register index, 49 int offset, 50 Register val, 51 Register tmp, 52 BarrierSet::Name barrier, 53 bool precise) { 54 assert(tmp != val && tmp != base && tmp != index, "register collision"); 55 assert(index == noreg || offset == 0, "only one offset"); 56 switch (barrier) { 57 #if INCLUDE_ALL_GCS 58 case BarrierSet::G1BarrierSet: 59 { 60 // Load and record the previous value. 61 __ g1_write_barrier_pre(base, index, offset, 62 noreg /* pre_val */, 63 tmp, true /*preserve_o_regs*/); 64 65 // G1 barrier needs uncompressed oop for region cross check. 66 Register new_val = val; 67 if (UseCompressedOops && val != G0) { 68 new_val = tmp; 69 __ mov(val, new_val); 70 } 71 72 if (index == noreg ) { 73 assert(Assembler::is_simm13(offset), "fix this code"); 74 __ store_heap_oop(val, base, offset); 75 } else { 76 __ store_heap_oop(val, base, index); 77 } 78 79 // No need for post barrier if storing NULL 80 if (val != G0) { 81 if (precise) { 82 if (index == noreg) { 83 __ add(base, offset, base); 84 } else { 85 __ add(base, index, base); 86 } 87 } 88 __ g1_write_barrier_post(base, new_val, tmp); 89 } 90 } 91 break; 92 #endif // INCLUDE_ALL_GCS 93 case BarrierSet::CardTableModRef: 94 { 95 if (index == noreg ) { 96 assert(Assembler::is_simm13(offset), "fix this code"); 97 __ store_heap_oop(val, base, offset); 98 } else { 99 __ store_heap_oop(val, base, index); 100 } 101 // No need for post barrier if storing NULL 102 if (val != G0) { 103 if (precise) { 104 if (index == noreg) { 105 __ add(base, offset, base); 106 } else { 107 __ add(base, index, base); 108 } 109 } 110 __ card_write_barrier_post(base, val, tmp); 111 } 112 } 113 break; 114 case BarrierSet::ModRef: 115 ShouldNotReachHere(); 116 break; 117 default : 118 ShouldNotReachHere(); 119 120 } 121 } 122 123 124 //---------------------------------------------------------------------------------------------------- 125 // Platform-dependent initialization 126 127 void TemplateTable::pd_initialize() { 128 // (none) 129 } 130 131 132 //---------------------------------------------------------------------------------------------------- 133 // Condition conversion 134 Assembler::Condition ccNot(TemplateTable::Condition cc) { 135 switch (cc) { 136 case TemplateTable::equal : return Assembler::notEqual; 137 case TemplateTable::not_equal : return Assembler::equal; 138 case TemplateTable::less : return Assembler::greaterEqual; 139 case TemplateTable::less_equal : return Assembler::greater; 140 case TemplateTable::greater : return Assembler::lessEqual; 141 case TemplateTable::greater_equal: return Assembler::less; 142 } 143 ShouldNotReachHere(); 144 return Assembler::zero; 145 } 146 147 //---------------------------------------------------------------------------------------------------- 148 // Miscelaneous helper routines 149 150 151 Address TemplateTable::at_bcp(int offset) { 152 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); 153 return Address(Lbcp, offset); 154 } 155 156 157 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, 158 Register temp_reg, bool load_bc_into_bc_reg/*=true*/, 159 int byte_no) { 160 // With sharing on, may need to test Method* flag. 161 if (!RewriteBytecodes) return; 162 Label L_patch_done; 163 164 switch (bc) { 165 case Bytecodes::_fast_aputfield: 166 case Bytecodes::_fast_bputfield: 167 case Bytecodes::_fast_zputfield: 168 case Bytecodes::_fast_cputfield: 169 case Bytecodes::_fast_dputfield: 170 case Bytecodes::_fast_fputfield: 171 case Bytecodes::_fast_iputfield: 172 case Bytecodes::_fast_lputfield: 173 case Bytecodes::_fast_sputfield: 174 { 175 // We skip bytecode quickening for putfield instructions when 176 // the put_code written to the constant pool cache is zero. 177 // This is required so that every execution of this instruction 178 // calls out to InterpreterRuntime::resolve_get_put to do 179 // additional, required work. 180 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 181 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 182 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1); 183 __ set(bc, bc_reg); 184 __ cmp_and_br_short(temp_reg, 0, Assembler::equal, Assembler::pn, L_patch_done); // don't patch 185 } 186 break; 187 default: 188 assert(byte_no == -1, "sanity"); 189 if (load_bc_into_bc_reg) { 190 __ set(bc, bc_reg); 191 } 192 } 193 194 if (JvmtiExport::can_post_breakpoint()) { 195 Label L_fast_patch; 196 __ ldub(at_bcp(0), temp_reg); 197 __ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch); 198 // perform the quickening, slowly, in the bowels of the breakpoint table 199 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg); 200 __ ba_short(L_patch_done); 201 __ bind(L_fast_patch); 202 } 203 204 #ifdef ASSERT 205 Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc); 206 Label L_okay; 207 __ ldub(at_bcp(0), temp_reg); 208 __ cmp(temp_reg, orig_bytecode); 209 __ br(Assembler::equal, false, Assembler::pt, L_okay); 210 __ delayed()->cmp(temp_reg, bc_reg); 211 __ br(Assembler::equal, false, Assembler::pt, L_okay); 212 __ delayed()->nop(); 213 __ stop("patching the wrong bytecode"); 214 __ bind(L_okay); 215 #endif 216 217 // patch bytecode 218 __ stb(bc_reg, at_bcp(0)); 219 __ bind(L_patch_done); 220 } 221 222 //---------------------------------------------------------------------------------------------------- 223 // Individual instructions 224 225 void TemplateTable::nop() { 226 transition(vtos, vtos); 227 // nothing to do 228 } 229 230 void TemplateTable::shouldnotreachhere() { 231 transition(vtos, vtos); 232 __ stop("shouldnotreachhere bytecode"); 233 } 234 235 void TemplateTable::aconst_null() { 236 transition(vtos, atos); 237 __ clr(Otos_i); 238 } 239 240 241 void TemplateTable::iconst(int value) { 242 transition(vtos, itos); 243 __ set(value, Otos_i); 244 } 245 246 247 void TemplateTable::lconst(int value) { 248 transition(vtos, ltos); 249 assert(value >= 0, "check this code"); 250 __ set(value, Otos_l); 251 } 252 253 254 void TemplateTable::fconst(int value) { 255 transition(vtos, ftos); 256 static float zero = 0.0, one = 1.0, two = 2.0; 257 float* p; 258 switch( value ) { 259 default: ShouldNotReachHere(); 260 case 0: p = &zero; break; 261 case 1: p = &one; break; 262 case 2: p = &two; break; 263 } 264 AddressLiteral a(p); 265 __ sethi(a, G3_scratch); 266 __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f); 267 } 268 269 270 void TemplateTable::dconst(int value) { 271 transition(vtos, dtos); 272 static double zero = 0.0, one = 1.0; 273 double* p; 274 switch( value ) { 275 default: ShouldNotReachHere(); 276 case 0: p = &zero; break; 277 case 1: p = &one; break; 278 } 279 AddressLiteral a(p); 280 __ sethi(a, G3_scratch); 281 __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d); 282 } 283 284 285 // %%%%% Should factore most snippet templates across platforms 286 287 void TemplateTable::bipush() { 288 transition(vtos, itos); 289 __ ldsb( at_bcp(1), Otos_i ); 290 } 291 292 void TemplateTable::sipush() { 293 transition(vtos, itos); 294 __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed); 295 } 296 297 void TemplateTable::ldc(bool wide) { 298 transition(vtos, vtos); 299 Label call_ldc, notInt, isString, notString, notClass, exit; 300 301 if (wide) { 302 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); 303 } else { 304 __ ldub(Lbcp, 1, O1); 305 } 306 __ get_cpool_and_tags(O0, O2); 307 308 const int base_offset = ConstantPool::header_size() * wordSize; 309 const int tags_offset = Array<u1>::base_offset_in_bytes(); 310 311 // get type from tags 312 __ add(O2, tags_offset, O2); 313 __ ldub(O2, O1, O2); 314 315 // unresolved class? If so, must resolve 316 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClass, Assembler::equal, Assembler::pt, call_ldc); 317 318 // unresolved class in error state 319 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClassInError, Assembler::equal, Assembler::pn, call_ldc); 320 321 __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class 322 __ brx(Assembler::notEqual, true, Assembler::pt, notClass); 323 __ delayed()->add(O0, base_offset, O0); 324 325 __ bind(call_ldc); 326 __ set(wide, O1); 327 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1); 328 __ push(atos); 329 __ ba_short(exit); 330 331 __ bind(notClass); 332 // __ add(O0, base_offset, O0); 333 __ sll(O1, LogBytesPerWord, O1); 334 __ cmp(O2, JVM_CONSTANT_Integer); 335 __ brx(Assembler::notEqual, true, Assembler::pt, notInt); 336 __ delayed()->cmp(O2, JVM_CONSTANT_String); 337 __ ld(O0, O1, Otos_i); 338 __ push(itos); 339 __ ba_short(exit); 340 341 __ bind(notInt); 342 // __ cmp(O2, JVM_CONSTANT_String); 343 __ brx(Assembler::notEqual, true, Assembler::pt, notString); 344 __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); 345 __ bind(isString); 346 __ stop("string should be rewritten to fast_aldc"); 347 __ ba_short(exit); 348 349 __ bind(notString); 350 // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); 351 __ push(ftos); 352 353 __ bind(exit); 354 } 355 356 // Fast path for caching oop constants. 357 // %%% We should use this to handle Class and String constants also. 358 // %%% It will simplify the ldc/primitive path considerably. 359 void TemplateTable::fast_aldc(bool wide) { 360 transition(vtos, atos); 361 362 int index_size = wide ? sizeof(u2) : sizeof(u1); 363 Label resolved; 364 365 // We are resolved if the resolved reference cache entry contains a 366 // non-null object (CallSite, etc.) 367 assert_different_registers(Otos_i, G3_scratch); 368 __ get_cache_index_at_bcp(Otos_i, G3_scratch, 1, index_size); // load index => G3_scratch 369 __ load_resolved_reference_at_index(Otos_i, G3_scratch); 370 __ tst(Otos_i); 371 __ br(Assembler::notEqual, false, Assembler::pt, resolved); 372 __ delayed()->set((int)bytecode(), O1); 373 374 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 375 376 // first time invocation - must resolve first 377 __ call_VM(Otos_i, entry, O1); 378 __ bind(resolved); 379 __ verify_oop(Otos_i); 380 } 381 382 void TemplateTable::ldc2_w() { 383 transition(vtos, vtos); 384 Label Long, exit; 385 386 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); 387 __ get_cpool_and_tags(O0, O2); 388 389 const int base_offset = ConstantPool::header_size() * wordSize; 390 const int tags_offset = Array<u1>::base_offset_in_bytes(); 391 // get type from tags 392 __ add(O2, tags_offset, O2); 393 __ ldub(O2, O1, O2); 394 395 __ sll(O1, LogBytesPerWord, O1); 396 __ add(O0, O1, G3_scratch); 397 398 __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, Long); 399 // A double can be placed at word-aligned locations in the constant pool. 400 // Check out Conversions.java for an example. 401 // Also ConstantPool::header_size() is 20, which makes it very difficult 402 // to double-align double on the constant pool. SG, 11/7/97 403 __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d); 404 __ push(dtos); 405 __ ba_short(exit); 406 407 __ bind(Long); 408 __ ldx(G3_scratch, base_offset, Otos_l); 409 __ push(ltos); 410 411 __ bind(exit); 412 } 413 414 void TemplateTable::locals_index(Register reg, int offset) { 415 __ ldub( at_bcp(offset), reg ); 416 } 417 418 void TemplateTable::locals_index_wide(Register reg) { 419 // offset is 2, not 1, because Lbcp points to wide prefix code 420 __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned); 421 } 422 423 void TemplateTable::iload() { 424 iload_internal(); 425 } 426 427 void TemplateTable::nofast_iload() { 428 iload_internal(may_not_rewrite); 429 } 430 431 void TemplateTable::iload_internal(RewriteControl rc) { 432 transition(vtos, itos); 433 // Rewrite iload,iload pair into fast_iload2 434 // iload,caload pair into fast_icaload 435 if (RewriteFrequentPairs && rc == may_rewrite) { 436 Label rewrite, done; 437 438 // get next byte 439 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch); 440 441 // if _iload, wait to rewrite to iload2. We only want to rewrite the 442 // last two iloads in a pair. Comparing against fast_iload means that 443 // the next bytecode is neither an iload or a caload, and therefore 444 // an iload pair. 445 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_iload, Assembler::equal, Assembler::pn, done); 446 447 __ cmp(G3_scratch, (int)Bytecodes::_fast_iload); 448 __ br(Assembler::equal, false, Assembler::pn, rewrite); 449 __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch); 450 451 __ cmp(G3_scratch, (int)Bytecodes::_caload); 452 __ br(Assembler::equal, false, Assembler::pn, rewrite); 453 __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch); 454 455 __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again 456 // rewrite 457 // G4_scratch: fast bytecode 458 __ bind(rewrite); 459 patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false); 460 __ bind(done); 461 } 462 463 // Get the local value into tos 464 locals_index(G3_scratch); 465 __ access_local_int( G3_scratch, Otos_i ); 466 } 467 468 void TemplateTable::fast_iload2() { 469 transition(vtos, itos); 470 locals_index(G3_scratch); 471 __ access_local_int( G3_scratch, Otos_i ); 472 __ push_i(); 473 locals_index(G3_scratch, 3); // get next bytecode's local index. 474 __ access_local_int( G3_scratch, Otos_i ); 475 } 476 477 void TemplateTable::fast_iload() { 478 transition(vtos, itos); 479 locals_index(G3_scratch); 480 __ access_local_int( G3_scratch, Otos_i ); 481 } 482 483 void TemplateTable::lload() { 484 transition(vtos, ltos); 485 locals_index(G3_scratch); 486 __ access_local_long( G3_scratch, Otos_l ); 487 } 488 489 490 void TemplateTable::fload() { 491 transition(vtos, ftos); 492 locals_index(G3_scratch); 493 __ access_local_float( G3_scratch, Ftos_f ); 494 } 495 496 497 void TemplateTable::dload() { 498 transition(vtos, dtos); 499 locals_index(G3_scratch); 500 __ access_local_double( G3_scratch, Ftos_d ); 501 } 502 503 504 void TemplateTable::aload() { 505 transition(vtos, atos); 506 locals_index(G3_scratch); 507 __ access_local_ptr( G3_scratch, Otos_i); 508 } 509 510 511 void TemplateTable::wide_iload() { 512 transition(vtos, itos); 513 locals_index_wide(G3_scratch); 514 __ access_local_int( G3_scratch, Otos_i ); 515 } 516 517 518 void TemplateTable::wide_lload() { 519 transition(vtos, ltos); 520 locals_index_wide(G3_scratch); 521 __ access_local_long( G3_scratch, Otos_l ); 522 } 523 524 525 void TemplateTable::wide_fload() { 526 transition(vtos, ftos); 527 locals_index_wide(G3_scratch); 528 __ access_local_float( G3_scratch, Ftos_f ); 529 } 530 531 532 void TemplateTable::wide_dload() { 533 transition(vtos, dtos); 534 locals_index_wide(G3_scratch); 535 __ access_local_double( G3_scratch, Ftos_d ); 536 } 537 538 539 void TemplateTable::wide_aload() { 540 transition(vtos, atos); 541 locals_index_wide(G3_scratch); 542 __ access_local_ptr( G3_scratch, Otos_i ); 543 __ verify_oop(Otos_i); 544 } 545 546 547 void TemplateTable::iaload() { 548 transition(itos, itos); 549 // Otos_i: index 550 // tos: array 551 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3); 552 __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i); 553 } 554 555 556 void TemplateTable::laload() { 557 transition(itos, ltos); 558 // Otos_i: index 559 // O2: array 560 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3); 561 __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l); 562 } 563 564 565 void TemplateTable::faload() { 566 transition(itos, ftos); 567 // Otos_i: index 568 // O2: array 569 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3); 570 __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f); 571 } 572 573 574 void TemplateTable::daload() { 575 transition(itos, dtos); 576 // Otos_i: index 577 // O2: array 578 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3); 579 __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d); 580 } 581 582 583 void TemplateTable::aaload() { 584 transition(itos, atos); 585 // Otos_i: index 586 // tos: array 587 __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3); 588 __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i); 589 __ verify_oop(Otos_i); 590 } 591 592 593 void TemplateTable::baload() { 594 transition(itos, itos); 595 // Otos_i: index 596 // tos: array 597 __ index_check(O2, Otos_i, 0, G3_scratch, O3); 598 __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i); 599 } 600 601 602 void TemplateTable::caload() { 603 transition(itos, itos); 604 // Otos_i: index 605 // tos: array 606 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 607 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i); 608 } 609 610 void TemplateTable::fast_icaload() { 611 transition(vtos, itos); 612 // Otos_i: index 613 // tos: array 614 locals_index(G3_scratch); 615 __ access_local_int( G3_scratch, Otos_i ); 616 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 617 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i); 618 } 619 620 621 void TemplateTable::saload() { 622 transition(itos, itos); 623 // Otos_i: index 624 // tos: array 625 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 626 __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i); 627 } 628 629 630 void TemplateTable::iload(int n) { 631 transition(vtos, itos); 632 __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i ); 633 } 634 635 636 void TemplateTable::lload(int n) { 637 transition(vtos, ltos); 638 assert(n+1 < Argument::n_register_parameters, "would need more code"); 639 __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l); 640 } 641 642 643 void TemplateTable::fload(int n) { 644 transition(vtos, ftos); 645 assert(n < Argument::n_register_parameters, "would need more code"); 646 __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f ); 647 } 648 649 650 void TemplateTable::dload(int n) { 651 transition(vtos, dtos); 652 FloatRegister dst = Ftos_d; 653 __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst); 654 } 655 656 657 void TemplateTable::aload(int n) { 658 transition(vtos, atos); 659 __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i ); 660 } 661 662 void TemplateTable::aload_0() { 663 aload_0_internal(); 664 } 665 666 void TemplateTable::nofast_aload_0() { 667 aload_0_internal(may_not_rewrite); 668 } 669 670 void TemplateTable::aload_0_internal(RewriteControl rc) { 671 transition(vtos, atos); 672 673 // According to bytecode histograms, the pairs: 674 // 675 // _aload_0, _fast_igetfield (itos) 676 // _aload_0, _fast_agetfield (atos) 677 // _aload_0, _fast_fgetfield (ftos) 678 // 679 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0 680 // bytecode checks the next bytecode and then rewrites the current 681 // bytecode into a pair bytecode; otherwise it rewrites the current 682 // bytecode into _fast_aload_0 that doesn't do the pair check anymore. 683 // 684 if (RewriteFrequentPairs && rc == may_rewrite) { 685 Label rewrite, done; 686 687 // get next byte 688 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch); 689 690 // if _getfield then wait with rewrite 691 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_getfield, Assembler::equal, Assembler::pn, done); 692 693 // if _igetfield then rewrite to _fast_iaccess_0 694 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 695 __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield); 696 __ br(Assembler::equal, false, Assembler::pn, rewrite); 697 __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch); 698 699 // if _agetfield then rewrite to _fast_aaccess_0 700 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 701 __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield); 702 __ br(Assembler::equal, false, Assembler::pn, rewrite); 703 __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch); 704 705 // if _fgetfield then rewrite to _fast_faccess_0 706 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 707 __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield); 708 __ br(Assembler::equal, false, Assembler::pn, rewrite); 709 __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch); 710 711 // else rewrite to _fast_aload0 712 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 713 __ set(Bytecodes::_fast_aload_0, G4_scratch); 714 715 // rewrite 716 // G4_scratch: fast bytecode 717 __ bind(rewrite); 718 patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false); 719 __ bind(done); 720 } 721 722 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). 723 aload(0); 724 } 725 726 void TemplateTable::istore() { 727 transition(itos, vtos); 728 locals_index(G3_scratch); 729 __ store_local_int( G3_scratch, Otos_i ); 730 } 731 732 733 void TemplateTable::lstore() { 734 transition(ltos, vtos); 735 locals_index(G3_scratch); 736 __ store_local_long( G3_scratch, Otos_l ); 737 } 738 739 740 void TemplateTable::fstore() { 741 transition(ftos, vtos); 742 locals_index(G3_scratch); 743 __ store_local_float( G3_scratch, Ftos_f ); 744 } 745 746 747 void TemplateTable::dstore() { 748 transition(dtos, vtos); 749 locals_index(G3_scratch); 750 __ store_local_double( G3_scratch, Ftos_d ); 751 } 752 753 754 void TemplateTable::astore() { 755 transition(vtos, vtos); 756 __ load_ptr(0, Otos_i); 757 __ inc(Lesp, Interpreter::stackElementSize); 758 __ verify_oop_or_return_address(Otos_i, G3_scratch); 759 locals_index(G3_scratch); 760 __ store_local_ptr(G3_scratch, Otos_i); 761 } 762 763 764 void TemplateTable::wide_istore() { 765 transition(vtos, vtos); 766 __ pop_i(); 767 locals_index_wide(G3_scratch); 768 __ store_local_int( G3_scratch, Otos_i ); 769 } 770 771 772 void TemplateTable::wide_lstore() { 773 transition(vtos, vtos); 774 __ pop_l(); 775 locals_index_wide(G3_scratch); 776 __ store_local_long( G3_scratch, Otos_l ); 777 } 778 779 780 void TemplateTable::wide_fstore() { 781 transition(vtos, vtos); 782 __ pop_f(); 783 locals_index_wide(G3_scratch); 784 __ store_local_float( G3_scratch, Ftos_f ); 785 } 786 787 788 void TemplateTable::wide_dstore() { 789 transition(vtos, vtos); 790 __ pop_d(); 791 locals_index_wide(G3_scratch); 792 __ store_local_double( G3_scratch, Ftos_d ); 793 } 794 795 796 void TemplateTable::wide_astore() { 797 transition(vtos, vtos); 798 __ load_ptr(0, Otos_i); 799 __ inc(Lesp, Interpreter::stackElementSize); 800 __ verify_oop_or_return_address(Otos_i, G3_scratch); 801 locals_index_wide(G3_scratch); 802 __ store_local_ptr(G3_scratch, Otos_i); 803 } 804 805 806 void TemplateTable::iastore() { 807 transition(itos, vtos); 808 __ pop_i(O2); // index 809 // Otos_i: val 810 // O3: array 811 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2); 812 __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT)); 813 } 814 815 816 void TemplateTable::lastore() { 817 transition(ltos, vtos); 818 __ pop_i(O2); // index 819 // Otos_l: val 820 // O3: array 821 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2); 822 __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG)); 823 } 824 825 826 void TemplateTable::fastore() { 827 transition(ftos, vtos); 828 __ pop_i(O2); // index 829 // Ftos_f: val 830 // O3: array 831 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2); 832 __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT)); 833 } 834 835 836 void TemplateTable::dastore() { 837 transition(dtos, vtos); 838 __ pop_i(O2); // index 839 // Fos_d: val 840 // O3: array 841 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2); 842 __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)); 843 } 844 845 846 void TemplateTable::aastore() { 847 Label store_ok, is_null, done; 848 transition(vtos, vtos); 849 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); 850 __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index 851 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array 852 // Otos_i: val 853 // O2: index 854 // O3: array 855 __ verify_oop(Otos_i); 856 __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1); 857 858 // do array store check - check for NULL value first 859 __ br_null_short( Otos_i, Assembler::pn, is_null ); 860 861 __ load_klass(O3, O4); // get array klass 862 __ load_klass(Otos_i, O5); // get value klass 863 864 // do fast instanceof cache test 865 866 __ ld_ptr(O4, in_bytes(ObjArrayKlass::element_klass_offset()), O4); 867 868 assert(Otos_i == O0, "just checking"); 869 870 // Otos_i: value 871 // O1: addr - offset 872 // O2: index 873 // O3: array 874 // O4: array element klass 875 // O5: value klass 876 877 // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); 878 879 // Generate a fast subtype check. Branch to store_ok if no 880 // failure. Throw if failure. 881 __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok ); 882 883 // Not a subtype; so must throw exception 884 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch ); 885 886 // Store is OK. 887 __ bind(store_ok); 888 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true); 889 890 __ ba(done); 891 __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value) 892 893 __ bind(is_null); 894 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true); 895 896 __ profile_null_seen(G3_scratch); 897 __ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value) 898 __ bind(done); 899 } 900 901 902 void TemplateTable::bastore() { 903 transition(itos, vtos); 904 __ pop_i(O2); // index 905 // Otos_i: val 906 // O2: index 907 // O3: array 908 __ index_check(O3, O2, 0, G3_scratch, O2); 909 // Need to check whether array is boolean or byte 910 // since both types share the bastore bytecode. 911 __ load_klass(O3, G4_scratch); 912 __ ld(G4_scratch, in_bytes(Klass::layout_helper_offset()), G4_scratch); 913 __ set(Klass::layout_helper_boolean_diffbit(), G3_scratch); 914 __ andcc(G3_scratch, G4_scratch, G0); 915 Label L_skip; 916 __ br(Assembler::zero, false, Assembler::pn, L_skip); 917 __ delayed()->nop(); 918 __ and3(Otos_i, 1, Otos_i); // if it is a T_BOOLEAN array, mask the stored value to 0/1 919 __ bind(L_skip); 920 __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE)); 921 } 922 923 924 void TemplateTable::castore() { 925 transition(itos, vtos); 926 __ pop_i(O2); // index 927 // Otos_i: val 928 // O3: array 929 __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2); 930 __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR)); 931 } 932 933 934 void TemplateTable::sastore() { 935 // %%%%% Factor across platform 936 castore(); 937 } 938 939 940 void TemplateTable::istore(int n) { 941 transition(itos, vtos); 942 __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n)); 943 } 944 945 946 void TemplateTable::lstore(int n) { 947 transition(ltos, vtos); 948 assert(n+1 < Argument::n_register_parameters, "only handle register cases"); 949 __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1)); 950 951 } 952 953 954 void TemplateTable::fstore(int n) { 955 transition(ftos, vtos); 956 assert(n < Argument::n_register_parameters, "only handle register cases"); 957 __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n)); 958 } 959 960 961 void TemplateTable::dstore(int n) { 962 transition(dtos, vtos); 963 FloatRegister src = Ftos_d; 964 __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1)); 965 } 966 967 968 void TemplateTable::astore(int n) { 969 transition(vtos, vtos); 970 __ load_ptr(0, Otos_i); 971 __ inc(Lesp, Interpreter::stackElementSize); 972 __ verify_oop_or_return_address(Otos_i, G3_scratch); 973 __ store_local_ptr(n, Otos_i); 974 } 975 976 977 void TemplateTable::pop() { 978 transition(vtos, vtos); 979 __ inc(Lesp, Interpreter::stackElementSize); 980 } 981 982 983 void TemplateTable::pop2() { 984 transition(vtos, vtos); 985 __ inc(Lesp, 2 * Interpreter::stackElementSize); 986 } 987 988 989 void TemplateTable::dup() { 990 transition(vtos, vtos); 991 // stack: ..., a 992 // load a and tag 993 __ load_ptr(0, Otos_i); 994 __ push_ptr(Otos_i); 995 // stack: ..., a, a 996 } 997 998 999 void TemplateTable::dup_x1() { 1000 transition(vtos, vtos); 1001 // stack: ..., a, b 1002 __ load_ptr( 1, G3_scratch); // get a 1003 __ load_ptr( 0, Otos_l1); // get b 1004 __ store_ptr(1, Otos_l1); // put b 1005 __ store_ptr(0, G3_scratch); // put a - like swap 1006 __ push_ptr(Otos_l1); // push b 1007 // stack: ..., b, a, b 1008 } 1009 1010 1011 void TemplateTable::dup_x2() { 1012 transition(vtos, vtos); 1013 // stack: ..., a, b, c 1014 // get c and push on stack, reuse registers 1015 __ load_ptr( 0, G3_scratch); // get c 1016 __ push_ptr(G3_scratch); // push c with tag 1017 // stack: ..., a, b, c, c (c in reg) (Lesp - 4) 1018 // (stack offsets n+1 now) 1019 __ load_ptr( 3, Otos_l1); // get a 1020 __ store_ptr(3, G3_scratch); // put c at 3 1021 // stack: ..., c, b, c, c (a in reg) 1022 __ load_ptr( 2, G3_scratch); // get b 1023 __ store_ptr(2, Otos_l1); // put a at 2 1024 // stack: ..., c, a, c, c (b in reg) 1025 __ store_ptr(1, G3_scratch); // put b at 1 1026 // stack: ..., c, a, b, c 1027 } 1028 1029 1030 void TemplateTable::dup2() { 1031 transition(vtos, vtos); 1032 __ load_ptr(1, G3_scratch); // get a 1033 __ load_ptr(0, Otos_l1); // get b 1034 __ push_ptr(G3_scratch); // push a 1035 __ push_ptr(Otos_l1); // push b 1036 // stack: ..., a, b, a, b 1037 } 1038 1039 1040 void TemplateTable::dup2_x1() { 1041 transition(vtos, vtos); 1042 // stack: ..., a, b, c 1043 __ load_ptr( 1, Lscratch); // get b 1044 __ load_ptr( 2, Otos_l1); // get a 1045 __ store_ptr(2, Lscratch); // put b at a 1046 // stack: ..., b, b, c 1047 __ load_ptr( 0, G3_scratch); // get c 1048 __ store_ptr(1, G3_scratch); // put c at b 1049 // stack: ..., b, c, c 1050 __ store_ptr(0, Otos_l1); // put a at c 1051 // stack: ..., b, c, a 1052 __ push_ptr(Lscratch); // push b 1053 __ push_ptr(G3_scratch); // push c 1054 // stack: ..., b, c, a, b, c 1055 } 1056 1057 1058 // The spec says that these types can be a mixture of category 1 (1 word) 1059 // types and/or category 2 types (long and doubles) 1060 void TemplateTable::dup2_x2() { 1061 transition(vtos, vtos); 1062 // stack: ..., a, b, c, d 1063 __ load_ptr( 1, Lscratch); // get c 1064 __ load_ptr( 3, Otos_l1); // get a 1065 __ store_ptr(3, Lscratch); // put c at 3 1066 __ store_ptr(1, Otos_l1); // put a at 1 1067 // stack: ..., c, b, a, d 1068 __ load_ptr( 2, G3_scratch); // get b 1069 __ load_ptr( 0, Otos_l1); // get d 1070 __ store_ptr(0, G3_scratch); // put b at 0 1071 __ store_ptr(2, Otos_l1); // put d at 2 1072 // stack: ..., c, d, a, b 1073 __ push_ptr(Lscratch); // push c 1074 __ push_ptr(Otos_l1); // push d 1075 // stack: ..., c, d, a, b, c, d 1076 } 1077 1078 1079 void TemplateTable::swap() { 1080 transition(vtos, vtos); 1081 // stack: ..., a, b 1082 __ load_ptr( 1, G3_scratch); // get a 1083 __ load_ptr( 0, Otos_l1); // get b 1084 __ store_ptr(0, G3_scratch); // put b 1085 __ store_ptr(1, Otos_l1); // put a 1086 // stack: ..., b, a 1087 } 1088 1089 1090 void TemplateTable::iop2(Operation op) { 1091 transition(itos, itos); 1092 __ pop_i(O1); 1093 switch (op) { 1094 case add: __ add(O1, Otos_i, Otos_i); break; 1095 case sub: __ sub(O1, Otos_i, Otos_i); break; 1096 // %%%%% Mul may not exist: better to call .mul? 1097 case mul: __ smul(O1, Otos_i, Otos_i); break; 1098 case _and: __ and3(O1, Otos_i, Otos_i); break; 1099 case _or: __ or3(O1, Otos_i, Otos_i); break; 1100 case _xor: __ xor3(O1, Otos_i, Otos_i); break; 1101 case shl: __ sll(O1, Otos_i, Otos_i); break; 1102 case shr: __ sra(O1, Otos_i, Otos_i); break; 1103 case ushr: __ srl(O1, Otos_i, Otos_i); break; 1104 default: ShouldNotReachHere(); 1105 } 1106 } 1107 1108 1109 void TemplateTable::lop2(Operation op) { 1110 transition(ltos, ltos); 1111 __ pop_l(O2); 1112 switch (op) { 1113 case add: __ add(O2, Otos_l, Otos_l); break; 1114 case sub: __ sub(O2, Otos_l, Otos_l); break; 1115 case _and: __ and3(O2, Otos_l, Otos_l); break; 1116 case _or: __ or3(O2, Otos_l, Otos_l); break; 1117 case _xor: __ xor3(O2, Otos_l, Otos_l); break; 1118 default: ShouldNotReachHere(); 1119 } 1120 } 1121 1122 1123 void TemplateTable::idiv() { 1124 // %%%%% Later: ForSPARC/V7 call .sdiv library routine, 1125 // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe. 1126 1127 transition(itos, itos); 1128 __ pop_i(O1); // get 1st op 1129 1130 // Y contains upper 32 bits of result, set it to 0 or all ones 1131 __ wry(G0); 1132 __ mov(~0, G3_scratch); 1133 1134 __ tst(O1); 1135 Label neg; 1136 __ br(Assembler::negative, true, Assembler::pn, neg); 1137 __ delayed()->wry(G3_scratch); 1138 __ bind(neg); 1139 1140 Label ok; 1141 __ tst(Otos_i); 1142 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch ); 1143 1144 const int min_int = 0x80000000; 1145 Label regular; 1146 __ cmp(Otos_i, -1); 1147 __ br(Assembler::notEqual, false, Assembler::pt, regular); 1148 // Don't put set in delay slot 1149 // Set will turn into multiple instructions in 64 bit mode 1150 __ delayed()->nop(); 1151 __ set(min_int, G4_scratch); 1152 Label done; 1153 __ cmp(O1, G4_scratch); 1154 __ br(Assembler::equal, true, Assembler::pt, done); 1155 __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken) 1156 1157 __ bind(regular); 1158 __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction! 1159 __ bind(done); 1160 } 1161 1162 1163 void TemplateTable::irem() { 1164 transition(itos, itos); 1165 __ mov(Otos_i, O2); // save divisor 1166 idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1 1167 __ smul(Otos_i, O2, Otos_i); 1168 __ sub(O1, Otos_i, Otos_i); 1169 } 1170 1171 1172 void TemplateTable::lmul() { 1173 transition(ltos, ltos); 1174 __ pop_l(O2); 1175 __ mulx(Otos_l, O2, Otos_l); 1176 1177 } 1178 1179 1180 void TemplateTable::ldiv() { 1181 transition(ltos, ltos); 1182 1183 // check for zero 1184 __ pop_l(O2); 1185 __ tst(Otos_l); 1186 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1187 __ sdivx(O2, Otos_l, Otos_l); 1188 } 1189 1190 1191 void TemplateTable::lrem() { 1192 transition(ltos, ltos); 1193 1194 // check for zero 1195 __ pop_l(O2); 1196 __ tst(Otos_l); 1197 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1198 __ sdivx(O2, Otos_l, Otos_l2); 1199 __ mulx (Otos_l2, Otos_l, Otos_l2); 1200 __ sub (O2, Otos_l2, Otos_l); 1201 } 1202 1203 1204 void TemplateTable::lshl() { 1205 transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra 1206 1207 __ pop_l(O2); // shift value in O2, O3 1208 __ sllx(O2, Otos_i, Otos_l); 1209 } 1210 1211 1212 void TemplateTable::lshr() { 1213 transition(itos, ltos); // %%%% see lshl comment 1214 1215 __ pop_l(O2); // shift value in O2, O3 1216 __ srax(O2, Otos_i, Otos_l); 1217 } 1218 1219 1220 1221 void TemplateTable::lushr() { 1222 transition(itos, ltos); // %%%% see lshl comment 1223 1224 __ pop_l(O2); // shift value in O2, O3 1225 __ srlx(O2, Otos_i, Otos_l); 1226 } 1227 1228 1229 void TemplateTable::fop2(Operation op) { 1230 transition(ftos, ftos); 1231 switch (op) { 1232 case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1233 case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1234 case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1235 case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1236 case rem: 1237 assert(Ftos_f == F0, "just checking"); 1238 // LP64 calling conventions use F1, F3 for passing 2 floats 1239 __ pop_f(F1); 1240 __ fmov(FloatRegisterImpl::S, Ftos_f, F3); 1241 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1242 assert( Ftos_f == F0, "fix this code" ); 1243 break; 1244 1245 default: ShouldNotReachHere(); 1246 } 1247 } 1248 1249 1250 void TemplateTable::dop2(Operation op) { 1251 transition(dtos, dtos); 1252 switch (op) { 1253 case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1254 case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1255 case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1256 case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1257 case rem: 1258 // Pass arguments in D0, D2 1259 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 ); 1260 __ pop_d( F0 ); 1261 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1262 assert( Ftos_d == F0, "fix this code" ); 1263 break; 1264 1265 default: ShouldNotReachHere(); 1266 } 1267 } 1268 1269 1270 void TemplateTable::ineg() { 1271 transition(itos, itos); 1272 __ neg(Otos_i); 1273 } 1274 1275 1276 void TemplateTable::lneg() { 1277 transition(ltos, ltos); 1278 __ sub(G0, Otos_l, Otos_l); 1279 } 1280 1281 1282 void TemplateTable::fneg() { 1283 transition(ftos, ftos); 1284 __ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f); 1285 } 1286 1287 1288 void TemplateTable::dneg() { 1289 transition(dtos, dtos); 1290 __ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f); 1291 } 1292 1293 1294 void TemplateTable::iinc() { 1295 transition(vtos, vtos); 1296 locals_index(G3_scratch); 1297 __ ldsb(Lbcp, 2, O2); // load constant 1298 __ access_local_int(G3_scratch, Otos_i); 1299 __ add(Otos_i, O2, Otos_i); 1300 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch 1301 } 1302 1303 1304 void TemplateTable::wide_iinc() { 1305 transition(vtos, vtos); 1306 locals_index_wide(G3_scratch); 1307 __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed); 1308 __ access_local_int(G3_scratch, Otos_i); 1309 __ add(Otos_i, O3, Otos_i); 1310 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch 1311 } 1312 1313 1314 void TemplateTable::convert() { 1315 // %%%%% Factor this first part accross platforms 1316 #ifdef ASSERT 1317 TosState tos_in = ilgl; 1318 TosState tos_out = ilgl; 1319 switch (bytecode()) { 1320 case Bytecodes::_i2l: // fall through 1321 case Bytecodes::_i2f: // fall through 1322 case Bytecodes::_i2d: // fall through 1323 case Bytecodes::_i2b: // fall through 1324 case Bytecodes::_i2c: // fall through 1325 case Bytecodes::_i2s: tos_in = itos; break; 1326 case Bytecodes::_l2i: // fall through 1327 case Bytecodes::_l2f: // fall through 1328 case Bytecodes::_l2d: tos_in = ltos; break; 1329 case Bytecodes::_f2i: // fall through 1330 case Bytecodes::_f2l: // fall through 1331 case Bytecodes::_f2d: tos_in = ftos; break; 1332 case Bytecodes::_d2i: // fall through 1333 case Bytecodes::_d2l: // fall through 1334 case Bytecodes::_d2f: tos_in = dtos; break; 1335 default : ShouldNotReachHere(); 1336 } 1337 switch (bytecode()) { 1338 case Bytecodes::_l2i: // fall through 1339 case Bytecodes::_f2i: // fall through 1340 case Bytecodes::_d2i: // fall through 1341 case Bytecodes::_i2b: // fall through 1342 case Bytecodes::_i2c: // fall through 1343 case Bytecodes::_i2s: tos_out = itos; break; 1344 case Bytecodes::_i2l: // fall through 1345 case Bytecodes::_f2l: // fall through 1346 case Bytecodes::_d2l: tos_out = ltos; break; 1347 case Bytecodes::_i2f: // fall through 1348 case Bytecodes::_l2f: // fall through 1349 case Bytecodes::_d2f: tos_out = ftos; break; 1350 case Bytecodes::_i2d: // fall through 1351 case Bytecodes::_l2d: // fall through 1352 case Bytecodes::_f2d: tos_out = dtos; break; 1353 default : ShouldNotReachHere(); 1354 } 1355 transition(tos_in, tos_out); 1356 #endif 1357 1358 1359 // Conversion 1360 Label done; 1361 switch (bytecode()) { 1362 case Bytecodes::_i2l: 1363 // Sign extend the 32 bits 1364 __ sra ( Otos_i, 0, Otos_l ); 1365 break; 1366 1367 case Bytecodes::_i2f: 1368 __ st(Otos_i, __ d_tmp ); 1369 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); 1370 __ fitof(FloatRegisterImpl::S, F0, Ftos_f); 1371 break; 1372 1373 case Bytecodes::_i2d: 1374 __ st(Otos_i, __ d_tmp); 1375 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); 1376 __ fitof(FloatRegisterImpl::D, F0, Ftos_f); 1377 break; 1378 1379 case Bytecodes::_i2b: 1380 __ sll(Otos_i, 24, Otos_i); 1381 __ sra(Otos_i, 24, Otos_i); 1382 break; 1383 1384 case Bytecodes::_i2c: 1385 __ sll(Otos_i, 16, Otos_i); 1386 __ srl(Otos_i, 16, Otos_i); 1387 break; 1388 1389 case Bytecodes::_i2s: 1390 __ sll(Otos_i, 16, Otos_i); 1391 __ sra(Otos_i, 16, Otos_i); 1392 break; 1393 1394 case Bytecodes::_l2i: 1395 // Sign-extend into the high 32 bits 1396 __ sra(Otos_l, 0, Otos_i); 1397 break; 1398 1399 case Bytecodes::_l2f: 1400 case Bytecodes::_l2d: 1401 __ st_long(Otos_l, __ d_tmp); 1402 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d); 1403 1404 if (bytecode() == Bytecodes::_l2f) { 1405 __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f); 1406 } else { 1407 __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d); 1408 } 1409 break; 1410 1411 case Bytecodes::_f2i: { 1412 Label isNaN; 1413 // result must be 0 if value is NaN; test by comparing value to itself 1414 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f); 1415 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN); 1416 __ delayed()->clr(Otos_i); // NaN 1417 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30); 1418 __ stf(FloatRegisterImpl::S, F30, __ d_tmp); 1419 __ ld(__ d_tmp, Otos_i); 1420 __ bind(isNaN); 1421 } 1422 break; 1423 1424 case Bytecodes::_f2l: 1425 // must uncache tos 1426 __ push_f(); 1427 __ pop_f(F1); 1428 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l)); 1429 break; 1430 1431 case Bytecodes::_f2d: 1432 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f); 1433 break; 1434 1435 case Bytecodes::_d2i: 1436 case Bytecodes::_d2l: 1437 // must uncache tos 1438 __ push_d(); 1439 // LP64 calling conventions pass first double arg in D0 1440 __ pop_d( Ftos_d ); 1441 __ call_VM_leaf(Lscratch, 1442 bytecode() == Bytecodes::_d2i 1443 ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i) 1444 : CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); 1445 break; 1446 1447 case Bytecodes::_d2f: 1448 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f); 1449 break; 1450 1451 default: ShouldNotReachHere(); 1452 } 1453 __ bind(done); 1454 } 1455 1456 1457 void TemplateTable::lcmp() { 1458 transition(ltos, itos); 1459 1460 __ pop_l(O1); // pop off value 1, value 2 is in O0 1461 __ lcmp( O1, Otos_l, Otos_i ); 1462 } 1463 1464 1465 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1466 1467 if (is_float) __ pop_f(F2); 1468 else __ pop_d(F2); 1469 1470 assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:"); 1471 1472 __ float_cmp( is_float, unordered_result, F2, F0, Otos_i ); 1473 } 1474 1475 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1476 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1477 __ verify_thread(); 1478 1479 const Register O2_bumped_count = O2; 1480 __ profile_taken_branch(G3_scratch, O2_bumped_count); 1481 1482 // get (wide) offset to O1_disp 1483 const Register O1_disp = O1; 1484 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC); 1485 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC); 1486 1487 // Handle all the JSR stuff here, then exit. 1488 // It's much shorter and cleaner than intermingling with the 1489 // non-JSR normal-branch stuff occurring below. 1490 if( is_jsr ) { 1491 // compute return address as bci in Otos_i 1492 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1493 __ sub(Lbcp, G3_scratch, G3_scratch); 1494 __ sub(G3_scratch, in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3), Otos_i); 1495 1496 // Bump Lbcp to target of JSR 1497 __ add(Lbcp, O1_disp, Lbcp); 1498 // Push returnAddress for "ret" on stack 1499 __ push_ptr(Otos_i); 1500 // And away we go! 1501 __ dispatch_next(vtos, 0, true); 1502 return; 1503 } 1504 1505 // Normal (non-jsr) branch handling 1506 1507 // Save the current Lbcp 1508 const Register l_cur_bcp = Lscratch; 1509 __ mov( Lbcp, l_cur_bcp ); 1510 1511 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1512 if ( increment_invocation_counter_for_backward_branches ) { 1513 Label Lforward; 1514 // check branch direction 1515 __ br( Assembler::positive, false, Assembler::pn, Lforward ); 1516 // Bump bytecode pointer by displacement (take the branch) 1517 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr 1518 1519 const Register G3_method_counters = G3_scratch; 1520 __ get_method_counters(Lmethod, G3_method_counters, Lforward); 1521 1522 if (TieredCompilation) { 1523 Label Lno_mdo, Loverflow; 1524 int increment = InvocationCounter::count_increment; 1525 if (ProfileInterpreter) { 1526 // If no method data exists, go to profile_continue. 1527 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch); 1528 __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo); 1529 1530 // Increment backedge counter in the MDO 1531 Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) + 1532 in_bytes(InvocationCounter::counter_offset())); 1533 Address mask(G4_scratch, in_bytes(MethodData::backedge_mask_offset())); 1534 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0, 1535 (UseOnStackReplacement ? Assembler::notZero : Assembler::always), &Lforward); 1536 __ ba_short(Loverflow); 1537 } 1538 1539 // If there's no MDO, increment counter in MethodCounters* 1540 __ bind(Lno_mdo); 1541 Address backedge_counter(G3_method_counters, 1542 in_bytes(MethodCounters::backedge_counter_offset()) + 1543 in_bytes(InvocationCounter::counter_offset())); 1544 Address mask(G3_method_counters, in_bytes(MethodCounters::backedge_mask_offset())); 1545 __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0, 1546 (UseOnStackReplacement ? Assembler::notZero : Assembler::always), &Lforward); 1547 __ bind(Loverflow); 1548 1549 // notify point for loop, pass branch bytecode 1550 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), l_cur_bcp); 1551 1552 // Was an OSR adapter generated? 1553 // O0 = osr nmethod 1554 __ br_null_short(O0, Assembler::pn, Lforward); 1555 1556 // Has the nmethod been invalidated already? 1557 __ ldub(O0, nmethod::state_offset(), O2); 1558 __ cmp_and_br_short(O2, nmethod::in_use, Assembler::notEqual, Assembler::pn, Lforward); 1559 1560 // migrate the interpreter frame off of the stack 1561 1562 __ mov(G2_thread, L7); 1563 // save nmethod 1564 __ mov(O0, L6); 1565 __ set_last_Java_frame(SP, noreg); 1566 __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); 1567 __ reset_last_Java_frame(); 1568 __ mov(L7, G2_thread); 1569 1570 // move OSR nmethod to I1 1571 __ mov(L6, I1); 1572 1573 // OSR buffer to I0 1574 __ mov(O0, I0); 1575 1576 // remove the interpreter frame 1577 __ restore(I5_savedSP, 0, SP); 1578 1579 // Jump to the osr code. 1580 __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); 1581 __ jmp(O2, G0); 1582 __ delayed()->nop(); 1583 1584 } else { // not TieredCompilation 1585 // Update Backedge branch separately from invocations 1586 const Register G4_invoke_ctr = G4; 1587 __ increment_backedge_counter(G3_method_counters, G4_invoke_ctr, G1_scratch); 1588 if (ProfileInterpreter) { 1589 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_method_counters, G1_scratch, Lforward); 1590 if (UseOnStackReplacement) { 1591 1592 __ test_backedge_count_for_osr(O2_bumped_count, G3_method_counters, l_cur_bcp, G1_scratch); 1593 } 1594 } else { 1595 if (UseOnStackReplacement) { 1596 __ test_backedge_count_for_osr(G4_invoke_ctr, G3_method_counters, l_cur_bcp, G1_scratch); 1597 } 1598 } 1599 } 1600 1601 __ bind(Lforward); 1602 } else 1603 // Bump bytecode pointer by displacement (take the branch) 1604 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr 1605 1606 // continue with bytecode @ target 1607 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above, 1608 // %%%%% and changing dispatch_next to dispatch_only 1609 __ dispatch_next(vtos, 0, true); 1610 } 1611 1612 1613 // Note Condition in argument is TemplateTable::Condition 1614 // arg scope is within class scope 1615 1616 void TemplateTable::if_0cmp(Condition cc) { 1617 // no pointers, integer only! 1618 transition(itos, vtos); 1619 // assume branch is more often taken than not (loops use backward branches) 1620 __ cmp( Otos_i, 0); 1621 __ if_cmp(ccNot(cc), false); 1622 } 1623 1624 1625 void TemplateTable::if_icmp(Condition cc) { 1626 transition(itos, vtos); 1627 __ pop_i(O1); 1628 __ cmp(O1, Otos_i); 1629 __ if_cmp(ccNot(cc), false); 1630 } 1631 1632 1633 void TemplateTable::if_nullcmp(Condition cc) { 1634 transition(atos, vtos); 1635 __ tst(Otos_i); 1636 __ if_cmp(ccNot(cc), true); 1637 } 1638 1639 1640 void TemplateTable::if_acmp(Condition cc) { 1641 transition(atos, vtos); 1642 __ pop_ptr(O1); 1643 __ verify_oop(O1); 1644 __ verify_oop(Otos_i); 1645 __ cmp(O1, Otos_i); 1646 __ if_cmp(ccNot(cc), true); 1647 } 1648 1649 1650 1651 void TemplateTable::ret() { 1652 transition(vtos, vtos); 1653 locals_index(G3_scratch); 1654 __ access_local_returnAddress(G3_scratch, Otos_i); 1655 // Otos_i contains the bci, compute the bcp from that 1656 1657 #ifdef ASSERT 1658 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC 1659 // the result. The return address (really a BCI) was stored with an 1660 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in 1661 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit 1662 // loaded value. 1663 { Label zzz ; 1664 __ set (65536, G3_scratch) ; 1665 __ cmp (Otos_i, G3_scratch) ; 1666 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz); 1667 __ delayed()->nop(); 1668 __ stop("BCI is in the wrong register half?"); 1669 __ bind (zzz) ; 1670 } 1671 #endif 1672 1673 __ profile_ret(vtos, Otos_i, G4_scratch); 1674 1675 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1676 __ add(G3_scratch, Otos_i, G3_scratch); 1677 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp); 1678 __ dispatch_next(vtos, 0, true); 1679 } 1680 1681 1682 void TemplateTable::wide_ret() { 1683 transition(vtos, vtos); 1684 locals_index_wide(G3_scratch); 1685 __ access_local_returnAddress(G3_scratch, Otos_i); 1686 // Otos_i contains the bci, compute the bcp from that 1687 1688 __ profile_ret(vtos, Otos_i, G4_scratch); 1689 1690 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1691 __ add(G3_scratch, Otos_i, G3_scratch); 1692 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp); 1693 __ dispatch_next(vtos, 0, true); 1694 } 1695 1696 1697 void TemplateTable::tableswitch() { 1698 transition(itos, vtos); 1699 Label default_case, continue_execution; 1700 1701 // align bcp 1702 __ add(Lbcp, BytesPerInt, O1); 1703 __ and3(O1, -BytesPerInt, O1); 1704 // load lo, hi 1705 __ ld(O1, 1 * BytesPerInt, O2); // Low Byte 1706 __ ld(O1, 2 * BytesPerInt, O3); // High Byte 1707 // Sign extend the 32 bits 1708 __ sra ( Otos_i, 0, Otos_i ); 1709 1710 // check against lo & hi 1711 __ cmp( Otos_i, O2); 1712 __ br( Assembler::less, false, Assembler::pn, default_case); 1713 __ delayed()->cmp( Otos_i, O3 ); 1714 __ br( Assembler::greater, false, Assembler::pn, default_case); 1715 // lookup dispatch offset 1716 __ delayed()->sub(Otos_i, O2, O2); 1717 __ profile_switch_case(O2, O3, G3_scratch, G4_scratch); 1718 __ sll(O2, LogBytesPerInt, O2); 1719 __ add(O2, 3 * BytesPerInt, O2); 1720 __ ba(continue_execution); 1721 __ delayed()->ld(O1, O2, O2); 1722 // handle default 1723 __ bind(default_case); 1724 __ profile_switch_default(O3); 1725 __ ld(O1, 0, O2); // get default offset 1726 // continue execution 1727 __ bind(continue_execution); 1728 __ add(Lbcp, O2, Lbcp); 1729 __ dispatch_next(vtos, 0, true); 1730 } 1731 1732 1733 void TemplateTable::lookupswitch() { 1734 transition(itos, itos); 1735 __ stop("lookupswitch bytecode should have been rewritten"); 1736 } 1737 1738 void TemplateTable::fast_linearswitch() { 1739 transition(itos, vtos); 1740 Label loop_entry, loop, found, continue_execution; 1741 // align bcp 1742 __ add(Lbcp, BytesPerInt, O1); 1743 __ and3(O1, -BytesPerInt, O1); 1744 // set counter 1745 __ ld(O1, BytesPerInt, O2); 1746 __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs 1747 __ add(O1, 2 * BytesPerInt, O3); // set first pair addr 1748 __ ba(loop_entry); 1749 __ delayed()->add(O3, O2, O2); // counter now points past last pair 1750 1751 // table search 1752 __ bind(loop); 1753 __ cmp(O4, Otos_i); 1754 __ br(Assembler::equal, true, Assembler::pn, found); 1755 __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4 1756 __ inc(O3, 2 * BytesPerInt); 1757 1758 __ bind(loop_entry); 1759 __ cmp(O2, O3); 1760 __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop); 1761 __ delayed()->ld(O3, 0, O4); 1762 1763 // default case 1764 __ ld(O1, 0, O4); // get default offset 1765 if (ProfileInterpreter) { 1766 __ profile_switch_default(O3); 1767 __ ba_short(continue_execution); 1768 } 1769 1770 // entry found -> get offset 1771 __ bind(found); 1772 if (ProfileInterpreter) { 1773 __ sub(O3, O1, O3); 1774 __ sub(O3, 2*BytesPerInt, O3); 1775 __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs 1776 __ profile_switch_case(O3, O1, O2, G3_scratch); 1777 1778 __ bind(continue_execution); 1779 } 1780 __ add(Lbcp, O4, Lbcp); 1781 __ dispatch_next(vtos, 0, true); 1782 } 1783 1784 1785 void TemplateTable::fast_binaryswitch() { 1786 transition(itos, vtos); 1787 // Implementation using the following core algorithm: (copied from Intel) 1788 // 1789 // int binary_search(int key, LookupswitchPair* array, int n) { 1790 // // Binary search according to "Methodik des Programmierens" by 1791 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 1792 // int i = 0; 1793 // int j = n; 1794 // while (i+1 < j) { 1795 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 1796 // // with Q: for all i: 0 <= i < n: key < a[i] 1797 // // where a stands for the array and assuming that the (inexisting) 1798 // // element a[n] is infinitely big. 1799 // int h = (i + j) >> 1; 1800 // // i < h < j 1801 // if (key < array[h].fast_match()) { 1802 // j = h; 1803 // } else { 1804 // i = h; 1805 // } 1806 // } 1807 // // R: a[i] <= key < a[i+1] or Q 1808 // // (i.e., if key is within array, i is the correct index) 1809 // return i; 1810 // } 1811 1812 // register allocation 1813 assert(Otos_i == O0, "alias checking"); 1814 const Register Rkey = Otos_i; // already set (tosca) 1815 const Register Rarray = O1; 1816 const Register Ri = O2; 1817 const Register Rj = O3; 1818 const Register Rh = O4; 1819 const Register Rscratch = O5; 1820 1821 const int log_entry_size = 3; 1822 const int entry_size = 1 << log_entry_size; 1823 1824 Label found; 1825 // Find Array start 1826 __ add(Lbcp, 3 * BytesPerInt, Rarray); 1827 __ and3(Rarray, -BytesPerInt, Rarray); 1828 // initialize i & j (in delay slot) 1829 __ clr( Ri ); 1830 1831 // and start 1832 Label entry; 1833 __ ba(entry); 1834 __ delayed()->ld( Rarray, -BytesPerInt, Rj); 1835 // (Rj is already in the native byte-ordering.) 1836 1837 // binary search loop 1838 { Label loop; 1839 __ bind( loop ); 1840 // int h = (i + j) >> 1; 1841 __ sra( Rh, 1, Rh ); 1842 // if (key < array[h].fast_match()) { 1843 // j = h; 1844 // } else { 1845 // i = h; 1846 // } 1847 __ sll( Rh, log_entry_size, Rscratch ); 1848 __ ld( Rarray, Rscratch, Rscratch ); 1849 // (Rscratch is already in the native byte-ordering.) 1850 __ cmp( Rkey, Rscratch ); 1851 __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match()) 1852 __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match()) 1853 1854 // while (i+1 < j) 1855 __ bind( entry ); 1856 __ add( Ri, 1, Rscratch ); 1857 __ cmp(Rscratch, Rj); 1858 __ br( Assembler::less, true, Assembler::pt, loop ); 1859 __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1; 1860 } 1861 1862 // end of binary search, result index is i (must check again!) 1863 Label default_case; 1864 Label continue_execution; 1865 if (ProfileInterpreter) { 1866 __ mov( Ri, Rh ); // Save index in i for profiling 1867 } 1868 __ sll( Ri, log_entry_size, Ri ); 1869 __ ld( Rarray, Ri, Rscratch ); 1870 // (Rscratch is already in the native byte-ordering.) 1871 __ cmp( Rkey, Rscratch ); 1872 __ br( Assembler::notEqual, true, Assembler::pn, default_case ); 1873 __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j 1874 1875 // entry found -> j = offset 1876 __ inc( Ri, BytesPerInt ); 1877 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 1878 __ ld( Rarray, Ri, Rj ); 1879 // (Rj is already in the native byte-ordering.) 1880 1881 if (ProfileInterpreter) { 1882 __ ba_short(continue_execution); 1883 } 1884 1885 __ bind(default_case); // fall through (if not profiling) 1886 __ profile_switch_default(Ri); 1887 1888 __ bind(continue_execution); 1889 __ add( Lbcp, Rj, Lbcp ); 1890 __ dispatch_next(vtos, 0, true); 1891 } 1892 1893 1894 void TemplateTable::_return(TosState state) { 1895 transition(state, state); 1896 assert(_desc->calls_vm(), "inconsistent calls_vm information"); 1897 1898 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 1899 assert(state == vtos, "only valid state"); 1900 __ mov(G0, G3_scratch); 1901 __ access_local_ptr(G3_scratch, Otos_i); 1902 __ load_klass(Otos_i, O2); 1903 __ set(JVM_ACC_HAS_FINALIZER, G3); 1904 __ ld(O2, in_bytes(Klass::access_flags_offset()), O2); 1905 __ andcc(G3, O2, G0); 1906 Label skip_register_finalizer; 1907 __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer); 1908 __ delayed()->nop(); 1909 1910 // Call out to do finalizer registration 1911 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i); 1912 1913 __ bind(skip_register_finalizer); 1914 } 1915 1916 if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) { 1917 Label no_safepoint; 1918 __ ldx(Address(G2_thread, Thread::polling_page_offset()), G3_scratch, 0); 1919 __ btst(SafepointMechanism::poll_bit(), G3_scratch); 1920 __ br(Assembler::zero, false, Assembler::pt, no_safepoint); 1921 __ delayed()->nop(); 1922 __ push(state); 1923 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)); 1924 __ pop(state); 1925 __ bind(no_safepoint); 1926 } 1927 1928 // Narrow result if state is itos but result type is smaller. 1929 // Need to narrow in the return bytecode rather than in generate_return_entry 1930 // since compiled code callers expect the result to already be narrowed. 1931 if (state == itos) { 1932 __ narrow(Otos_i); 1933 } 1934 __ remove_activation(state, /* throw_monitor_exception */ true); 1935 1936 // The caller's SP was adjusted upon method entry to accomodate 1937 // the callee's non-argument locals. Undo that adjustment. 1938 __ ret(); // return to caller 1939 __ delayed()->restore(I5_savedSP, G0, SP); 1940 } 1941 1942 1943 // ---------------------------------------------------------------------------- 1944 // Volatile variables demand their effects be made known to all CPU's in 1945 // order. Store buffers on most chips allow reads & writes to reorder; the 1946 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of 1947 // memory barrier (i.e., it's not sufficient that the interpreter does not 1948 // reorder volatile references, the hardware also must not reorder them). 1949 // 1950 // According to the new Java Memory Model (JMM): 1951 // (1) All volatiles are serialized wrt to each other. 1952 // ALSO reads & writes act as aquire & release, so: 1953 // (2) A read cannot let unrelated NON-volatile memory refs that happen after 1954 // the read float up to before the read. It's OK for non-volatile memory refs 1955 // that happen before the volatile read to float down below it. 1956 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs 1957 // that happen BEFORE the write float down to after the write. It's OK for 1958 // non-volatile memory refs that happen after the volatile write to float up 1959 // before it. 1960 // 1961 // We only put in barriers around volatile refs (they are expensive), not 1962 // _between_ memory refs (that would require us to track the flavor of the 1963 // previous memory refs). Requirements (2) and (3) require some barriers 1964 // before volatile stores and after volatile loads. These nearly cover 1965 // requirement (1) but miss the volatile-store-volatile-load case. This final 1966 // case is placed after volatile-stores although it could just as well go 1967 // before volatile-loads. 1968 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) { 1969 // Helper function to insert a is-volatile test and memory barrier 1970 // All current sparc implementations run in TSO, needing only StoreLoad 1971 if ((order_constraint & Assembler::StoreLoad) == 0) return; 1972 __ membar( order_constraint ); 1973 } 1974 1975 // ---------------------------------------------------------------------------- 1976 void TemplateTable::resolve_cache_and_index(int byte_no, 1977 Register Rcache, 1978 Register index, 1979 size_t index_size) { 1980 // Depends on cpCacheOop layout! 1981 1982 Label resolved; 1983 Bytecodes::Code code = bytecode(); 1984 switch (code) { 1985 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; 1986 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; 1987 } 1988 1989 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 1990 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size); 1991 __ cmp(Lbyte_code, code); // have we resolved this bytecode? 1992 __ br(Assembler::equal, false, Assembler::pt, resolved); 1993 __ delayed()->set(code, O1); 1994 1995 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 1996 // first time invocation - must resolve first 1997 __ call_VM(noreg, entry, O1); 1998 // Update registers with resolved info 1999 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); 2000 __ bind(resolved); 2001 } 2002 2003 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2004 Register method, 2005 Register itable_index, 2006 Register flags, 2007 bool is_invokevirtual, 2008 bool is_invokevfinal, 2009 bool is_invokedynamic) { 2010 // Uses both G3_scratch and G4_scratch 2011 Register cache = G3_scratch; 2012 Register index = G4_scratch; 2013 assert_different_registers(cache, method, itable_index); 2014 2015 // determine constant pool cache field offsets 2016 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2017 const int method_offset = in_bytes( 2018 ConstantPoolCache::base_offset() + 2019 ((byte_no == f2_byte) 2020 ? ConstantPoolCacheEntry::f2_offset() 2021 : ConstantPoolCacheEntry::f1_offset() 2022 ) 2023 ); 2024 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() + 2025 ConstantPoolCacheEntry::flags_offset()); 2026 // access constant pool cache fields 2027 const int index_offset = in_bytes(ConstantPoolCache::base_offset() + 2028 ConstantPoolCacheEntry::f2_offset()); 2029 2030 if (is_invokevfinal) { 2031 __ get_cache_and_index_at_bcp(cache, index, 1); 2032 __ ld_ptr(Address(cache, method_offset), method); 2033 } else { 2034 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2035 resolve_cache_and_index(byte_no, cache, index, index_size); 2036 __ ld_ptr(Address(cache, method_offset), method); 2037 } 2038 2039 if (itable_index != noreg) { 2040 // pick up itable or appendix index from f2 also: 2041 __ ld_ptr(Address(cache, index_offset), itable_index); 2042 } 2043 __ ld_ptr(Address(cache, flags_offset), flags); 2044 } 2045 2046 // The Rcache register must be set before call 2047 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2048 Register Rcache, 2049 Register index, 2050 Register Roffset, 2051 Register Rflags, 2052 bool is_static) { 2053 assert_different_registers(Rcache, Rflags, Roffset); 2054 2055 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2056 2057 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2058 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2059 if (is_static) { 2060 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj); 2061 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 2062 __ ld_ptr( Robj, mirror_offset, Robj); 2063 __ resolve_oop_handle(Robj); 2064 } 2065 } 2066 2067 // The registers Rcache and index expected to be set before call. 2068 // Correct values of the Rcache and index registers are preserved. 2069 void TemplateTable::jvmti_post_field_access(Register Rcache, 2070 Register index, 2071 bool is_static, 2072 bool has_tos) { 2073 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2074 2075 if (JvmtiExport::can_post_field_access()) { 2076 // Check to see if a field access watch has been set before we take 2077 // the time to call into the VM. 2078 Label Label1; 2079 assert_different_registers(Rcache, index, G1_scratch); 2080 AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr()); 2081 __ load_contents(get_field_access_count_addr, G1_scratch); 2082 __ cmp_and_br_short(G1_scratch, 0, Assembler::equal, Assembler::pt, Label1); 2083 2084 __ add(Rcache, in_bytes(cp_base_offset), Rcache); 2085 2086 if (is_static) { 2087 __ clr(Otos_i); 2088 } else { 2089 if (has_tos) { 2090 // save object pointer before call_VM() clobbers it 2091 __ push_ptr(Otos_i); // put object on tos where GC wants it. 2092 } else { 2093 // Load top of stack (do not pop the value off the stack); 2094 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); 2095 } 2096 __ verify_oop(Otos_i); 2097 } 2098 // Otos_i: object pointer or NULL if static 2099 // Rcache: cache entry pointer 2100 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), 2101 Otos_i, Rcache); 2102 if (!is_static && has_tos) { 2103 __ pop_ptr(Otos_i); // restore object pointer 2104 __ verify_oop(Otos_i); 2105 } 2106 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2107 __ bind(Label1); 2108 } 2109 } 2110 2111 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2112 transition(vtos, vtos); 2113 2114 Register Rcache = G3_scratch; 2115 Register index = G4_scratch; 2116 Register Rclass = Rcache; 2117 Register Roffset= G4_scratch; 2118 Register Rflags = G1_scratch; 2119 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2120 2121 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2)); 2122 jvmti_post_field_access(Rcache, index, is_static, false); 2123 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); 2124 2125 if (!is_static) { 2126 pop_and_check_object(Rclass); 2127 } else { 2128 __ verify_oop(Rclass); 2129 } 2130 2131 Label exit; 2132 2133 Assembler::Membar_mask_bits membar_bits = 2134 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2135 2136 if (__ membar_has_effect(membar_bits)) { 2137 // Get volatile flag 2138 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2139 __ and3(Rflags, Lscratch, Lscratch); 2140 } 2141 2142 Label checkVolatile; 2143 2144 // compute field type 2145 Label notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj; 2146 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2147 // Make sure we don't need to mask Rflags after the above shift 2148 ConstantPoolCacheEntry::verify_tos_state_shift(); 2149 2150 // Check atos before itos for getstatic, more likely (in Queens at least) 2151 __ cmp(Rflags, atos); 2152 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2153 __ delayed() ->cmp(Rflags, itos); 2154 2155 // atos 2156 __ load_heap_oop(Rclass, Roffset, Otos_i); 2157 __ verify_oop(Otos_i); 2158 __ push(atos); 2159 if (!is_static && rc == may_rewrite) { 2160 patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch); 2161 } 2162 __ ba(checkVolatile); 2163 __ delayed()->tst(Lscratch); 2164 2165 __ bind(notObj); 2166 2167 // cmp(Rflags, itos); 2168 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2169 __ delayed() ->cmp(Rflags, ltos); 2170 2171 // itos 2172 __ ld(Rclass, Roffset, Otos_i); 2173 __ push(itos); 2174 if (!is_static && rc == may_rewrite) { 2175 patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch); 2176 } 2177 __ ba(checkVolatile); 2178 __ delayed()->tst(Lscratch); 2179 2180 __ bind(notInt); 2181 2182 // cmp(Rflags, ltos); 2183 __ br(Assembler::notEqual, false, Assembler::pt, notLong); 2184 __ delayed() ->cmp(Rflags, btos); 2185 2186 // ltos 2187 // load must be atomic 2188 __ ld_long(Rclass, Roffset, Otos_l); 2189 __ push(ltos); 2190 if (!is_static && rc == may_rewrite) { 2191 patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch); 2192 } 2193 __ ba(checkVolatile); 2194 __ delayed()->tst(Lscratch); 2195 2196 __ bind(notLong); 2197 2198 // cmp(Rflags, btos); 2199 __ br(Assembler::notEqual, false, Assembler::pt, notByte); 2200 __ delayed() ->cmp(Rflags, ztos); 2201 2202 // btos 2203 __ ldsb(Rclass, Roffset, Otos_i); 2204 __ push(itos); 2205 if (!is_static && rc == may_rewrite) { 2206 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch); 2207 } 2208 __ ba(checkVolatile); 2209 __ delayed()->tst(Lscratch); 2210 2211 __ bind(notByte); 2212 2213 // cmp(Rflags, ztos); 2214 __ br(Assembler::notEqual, false, Assembler::pt, notBool); 2215 __ delayed() ->cmp(Rflags, ctos); 2216 2217 // ztos 2218 __ ldsb(Rclass, Roffset, Otos_i); 2219 __ push(itos); 2220 if (!is_static && rc == may_rewrite) { 2221 // use btos rewriting, no truncating to t/f bit is needed for getfield. 2222 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch); 2223 } 2224 __ ba(checkVolatile); 2225 __ delayed()->tst(Lscratch); 2226 2227 __ bind(notBool); 2228 2229 // cmp(Rflags, ctos); 2230 __ br(Assembler::notEqual, false, Assembler::pt, notChar); 2231 __ delayed() ->cmp(Rflags, stos); 2232 2233 // ctos 2234 __ lduh(Rclass, Roffset, Otos_i); 2235 __ push(itos); 2236 if (!is_static && rc == may_rewrite) { 2237 patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch); 2238 } 2239 __ ba(checkVolatile); 2240 __ delayed()->tst(Lscratch); 2241 2242 __ bind(notChar); 2243 2244 // cmp(Rflags, stos); 2245 __ br(Assembler::notEqual, false, Assembler::pt, notShort); 2246 __ delayed() ->cmp(Rflags, ftos); 2247 2248 // stos 2249 __ ldsh(Rclass, Roffset, Otos_i); 2250 __ push(itos); 2251 if (!is_static && rc == may_rewrite) { 2252 patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch); 2253 } 2254 __ ba(checkVolatile); 2255 __ delayed()->tst(Lscratch); 2256 2257 __ bind(notShort); 2258 2259 2260 // cmp(Rflags, ftos); 2261 __ br(Assembler::notEqual, false, Assembler::pt, notFloat); 2262 __ delayed() ->tst(Lscratch); 2263 2264 // ftos 2265 __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f); 2266 __ push(ftos); 2267 if (!is_static && rc == may_rewrite) { 2268 patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch); 2269 } 2270 __ ba(checkVolatile); 2271 __ delayed()->tst(Lscratch); 2272 2273 __ bind(notFloat); 2274 2275 2276 // dtos 2277 __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d); 2278 __ push(dtos); 2279 if (!is_static && rc == may_rewrite) { 2280 patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch); 2281 } 2282 2283 __ bind(checkVolatile); 2284 if (__ membar_has_effect(membar_bits)) { 2285 // __ tst(Lscratch); executed in delay slot 2286 __ br(Assembler::zero, false, Assembler::pt, exit); 2287 __ delayed()->nop(); 2288 volatile_barrier(membar_bits); 2289 } 2290 2291 __ bind(exit); 2292 } 2293 2294 void TemplateTable::getfield(int byte_no) { 2295 getfield_or_static(byte_no, false); 2296 } 2297 2298 void TemplateTable::nofast_getfield(int byte_no) { 2299 getfield_or_static(byte_no, false, may_not_rewrite); 2300 } 2301 2302 void TemplateTable::getstatic(int byte_no) { 2303 getfield_or_static(byte_no, true); 2304 } 2305 2306 void TemplateTable::fast_accessfield(TosState state) { 2307 transition(atos, state); 2308 Register Rcache = G3_scratch; 2309 Register index = G4_scratch; 2310 Register Roffset = G4_scratch; 2311 Register Rflags = Rcache; 2312 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2313 2314 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2315 jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true); 2316 2317 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2318 2319 __ null_check(Otos_i); 2320 __ verify_oop(Otos_i); 2321 2322 Label exit; 2323 2324 Assembler::Membar_mask_bits membar_bits = 2325 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2326 if (__ membar_has_effect(membar_bits)) { 2327 // Get volatile flag 2328 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags); 2329 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2330 } 2331 2332 switch (bytecode()) { 2333 case Bytecodes::_fast_bgetfield: 2334 __ ldsb(Otos_i, Roffset, Otos_i); 2335 break; 2336 case Bytecodes::_fast_cgetfield: 2337 __ lduh(Otos_i, Roffset, Otos_i); 2338 break; 2339 case Bytecodes::_fast_sgetfield: 2340 __ ldsh(Otos_i, Roffset, Otos_i); 2341 break; 2342 case Bytecodes::_fast_igetfield: 2343 __ ld(Otos_i, Roffset, Otos_i); 2344 break; 2345 case Bytecodes::_fast_lgetfield: 2346 __ ld_long(Otos_i, Roffset, Otos_l); 2347 break; 2348 case Bytecodes::_fast_fgetfield: 2349 __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f); 2350 break; 2351 case Bytecodes::_fast_dgetfield: 2352 __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d); 2353 break; 2354 case Bytecodes::_fast_agetfield: 2355 __ load_heap_oop(Otos_i, Roffset, Otos_i); 2356 break; 2357 default: 2358 ShouldNotReachHere(); 2359 } 2360 2361 if (__ membar_has_effect(membar_bits)) { 2362 __ btst(Lscratch, Rflags); 2363 __ br(Assembler::zero, false, Assembler::pt, exit); 2364 __ delayed()->nop(); 2365 volatile_barrier(membar_bits); 2366 __ bind(exit); 2367 } 2368 2369 if (state == atos) { 2370 __ verify_oop(Otos_i); // does not blow flags! 2371 } 2372 } 2373 2374 void TemplateTable::jvmti_post_fast_field_mod() { 2375 if (JvmtiExport::can_post_field_modification()) { 2376 // Check to see if a field modification watch has been set before we take 2377 // the time to call into the VM. 2378 Label done; 2379 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); 2380 __ load_contents(get_field_modification_count_addr, G4_scratch); 2381 __ cmp_and_br_short(G4_scratch, 0, Assembler::equal, Assembler::pt, done); 2382 __ pop_ptr(G4_scratch); // copy the object pointer from tos 2383 __ verify_oop(G4_scratch); 2384 __ push_ptr(G4_scratch); // put the object pointer back on tos 2385 __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1); 2386 // Save tos values before call_VM() clobbers them. Since we have 2387 // to do it for every data type, we use the saved values as the 2388 // jvalue object. 2389 switch (bytecode()) { // save tos values before call_VM() clobbers them 2390 case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break; 2391 case Bytecodes::_fast_bputfield: // fall through 2392 case Bytecodes::_fast_zputfield: // fall through 2393 case Bytecodes::_fast_sputfield: // fall through 2394 case Bytecodes::_fast_cputfield: // fall through 2395 case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break; 2396 case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break; 2397 case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break; 2398 // get words in right order for use as jvalue object 2399 case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break; 2400 } 2401 // setup pointer to jvalue object 2402 __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize); 2403 // G4_scratch: object pointer 2404 // G1_scratch: cache entry pointer 2405 // G3_scratch: jvalue object on the stack 2406 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch); 2407 switch (bytecode()) { // restore tos values 2408 case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break; 2409 case Bytecodes::_fast_bputfield: // fall through 2410 case Bytecodes::_fast_zputfield: // fall through 2411 case Bytecodes::_fast_sputfield: // fall through 2412 case Bytecodes::_fast_cputfield: // fall through 2413 case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break; 2414 case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break; 2415 case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break; 2416 case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break; 2417 } 2418 __ bind(done); 2419 } 2420 } 2421 2422 // The registers Rcache and index expected to be set before call. 2423 // The function may destroy various registers, just not the Rcache and index registers. 2424 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) { 2425 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2426 2427 if (JvmtiExport::can_post_field_modification()) { 2428 // Check to see if a field modification watch has been set before we take 2429 // the time to call into the VM. 2430 Label Label1; 2431 assert_different_registers(Rcache, index, G1_scratch); 2432 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); 2433 __ load_contents(get_field_modification_count_addr, G1_scratch); 2434 __ cmp_and_br_short(G1_scratch, 0, Assembler::zero, Assembler::pt, Label1); 2435 2436 // The Rcache and index registers have been already set. 2437 // This allows to eliminate this call but the Rcache and index 2438 // registers must be correspondingly used after this line. 2439 __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1); 2440 2441 __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch); 2442 if (is_static) { 2443 // Life is simple. Null out the object pointer. 2444 __ clr(G4_scratch); 2445 } else { 2446 Register Rflags = G1_scratch; 2447 // Life is harder. The stack holds the value on top, followed by the 2448 // object. We don't know the size of the value, though; it could be 2449 // one or two words depending on its type. As a result, we must find 2450 // the type to determine where the object is. 2451 2452 Label two_word, valsizeknown; 2453 __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2454 __ mov(Lesp, G4_scratch); 2455 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2456 // Make sure we don't need to mask Rflags after the above shift 2457 ConstantPoolCacheEntry::verify_tos_state_shift(); 2458 __ cmp(Rflags, ltos); 2459 __ br(Assembler::equal, false, Assembler::pt, two_word); 2460 __ delayed()->cmp(Rflags, dtos); 2461 __ br(Assembler::equal, false, Assembler::pt, two_word); 2462 __ delayed()->nop(); 2463 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1)); 2464 __ ba_short(valsizeknown); 2465 __ bind(two_word); 2466 2467 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2)); 2468 2469 __ bind(valsizeknown); 2470 // setup object pointer 2471 __ ld_ptr(G4_scratch, 0, G4_scratch); 2472 __ verify_oop(G4_scratch); 2473 } 2474 // setup pointer to jvalue object 2475 __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize); 2476 // G4_scratch: object pointer or NULL if static 2477 // G3_scratch: cache entry pointer 2478 // G1_scratch: jvalue object on the stack 2479 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), 2480 G4_scratch, G3_scratch, G1_scratch); 2481 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2482 __ bind(Label1); 2483 } 2484 } 2485 2486 void TemplateTable::pop_and_check_object(Register r) { 2487 __ pop_ptr(r); 2488 __ null_check(r); // for field access must check obj. 2489 __ verify_oop(r); 2490 } 2491 2492 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2493 transition(vtos, vtos); 2494 Register Rcache = G3_scratch; 2495 Register index = G4_scratch; 2496 Register Rclass = Rcache; 2497 Register Roffset= G4_scratch; 2498 Register Rflags = G1_scratch; 2499 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2500 2501 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2)); 2502 jvmti_post_field_mod(Rcache, index, is_static); 2503 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); 2504 2505 Assembler::Membar_mask_bits read_bits = 2506 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); 2507 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; 2508 2509 Label notVolatile, checkVolatile, exit; 2510 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { 2511 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2512 __ and3(Rflags, Lscratch, Lscratch); 2513 2514 if (__ membar_has_effect(read_bits)) { 2515 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); 2516 volatile_barrier(read_bits); 2517 __ bind(notVolatile); 2518 } 2519 } 2520 2521 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2522 // Make sure we don't need to mask Rflags after the above shift 2523 ConstantPoolCacheEntry::verify_tos_state_shift(); 2524 2525 // compute field type 2526 Label notInt, notShort, notChar, notObj, notByte, notBool, notLong, notFloat; 2527 2528 if (is_static) { 2529 // putstatic with object type most likely, check that first 2530 __ cmp(Rflags, atos); 2531 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2532 __ delayed()->cmp(Rflags, itos); 2533 2534 // atos 2535 { 2536 __ pop_ptr(); 2537 __ verify_oop(Otos_i); 2538 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2539 __ ba(checkVolatile); 2540 __ delayed()->tst(Lscratch); 2541 } 2542 2543 __ bind(notObj); 2544 // cmp(Rflags, itos); 2545 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2546 __ delayed()->cmp(Rflags, btos); 2547 2548 // itos 2549 { 2550 __ pop_i(); 2551 __ st(Otos_i, Rclass, Roffset); 2552 __ ba(checkVolatile); 2553 __ delayed()->tst(Lscratch); 2554 } 2555 2556 __ bind(notInt); 2557 } else { 2558 // putfield with int type most likely, check that first 2559 __ cmp(Rflags, itos); 2560 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2561 __ delayed()->cmp(Rflags, atos); 2562 2563 // itos 2564 { 2565 __ pop_i(); 2566 pop_and_check_object(Rclass); 2567 __ st(Otos_i, Rclass, Roffset); 2568 if (rc == may_rewrite) patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no); 2569 __ ba(checkVolatile); 2570 __ delayed()->tst(Lscratch); 2571 } 2572 2573 __ bind(notInt); 2574 // cmp(Rflags, atos); 2575 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2576 __ delayed()->cmp(Rflags, btos); 2577 2578 // atos 2579 { 2580 __ pop_ptr(); 2581 pop_and_check_object(Rclass); 2582 __ verify_oop(Otos_i); 2583 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2584 if (rc == may_rewrite) patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no); 2585 __ ba(checkVolatile); 2586 __ delayed()->tst(Lscratch); 2587 } 2588 2589 __ bind(notObj); 2590 } 2591 2592 // cmp(Rflags, btos); 2593 __ br(Assembler::notEqual, false, Assembler::pt, notByte); 2594 __ delayed()->cmp(Rflags, ztos); 2595 2596 // btos 2597 { 2598 __ pop_i(); 2599 if (!is_static) pop_and_check_object(Rclass); 2600 __ stb(Otos_i, Rclass, Roffset); 2601 if (!is_static && rc == may_rewrite) { 2602 patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no); 2603 } 2604 __ ba(checkVolatile); 2605 __ delayed()->tst(Lscratch); 2606 } 2607 2608 __ bind(notByte); 2609 2610 // cmp(Rflags, btos); 2611 __ br(Assembler::notEqual, false, Assembler::pt, notBool); 2612 __ delayed()->cmp(Rflags, ltos); 2613 2614 // ztos 2615 { 2616 __ pop_i(); 2617 if (!is_static) pop_and_check_object(Rclass); 2618 __ and3(Otos_i, 1, Otos_i); 2619 __ stb(Otos_i, Rclass, Roffset); 2620 if (!is_static && rc == may_rewrite) { 2621 patch_bytecode(Bytecodes::_fast_zputfield, G3_scratch, G4_scratch, true, byte_no); 2622 } 2623 __ ba(checkVolatile); 2624 __ delayed()->tst(Lscratch); 2625 } 2626 2627 __ bind(notBool); 2628 // cmp(Rflags, ltos); 2629 __ br(Assembler::notEqual, false, Assembler::pt, notLong); 2630 __ delayed()->cmp(Rflags, ctos); 2631 2632 // ltos 2633 { 2634 __ pop_l(); 2635 if (!is_static) pop_and_check_object(Rclass); 2636 __ st_long(Otos_l, Rclass, Roffset); 2637 if (!is_static && rc == may_rewrite) { 2638 patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no); 2639 } 2640 __ ba(checkVolatile); 2641 __ delayed()->tst(Lscratch); 2642 } 2643 2644 __ bind(notLong); 2645 // cmp(Rflags, ctos); 2646 __ br(Assembler::notEqual, false, Assembler::pt, notChar); 2647 __ delayed()->cmp(Rflags, stos); 2648 2649 // ctos (char) 2650 { 2651 __ pop_i(); 2652 if (!is_static) pop_and_check_object(Rclass); 2653 __ sth(Otos_i, Rclass, Roffset); 2654 if (!is_static && rc == may_rewrite) { 2655 patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no); 2656 } 2657 __ ba(checkVolatile); 2658 __ delayed()->tst(Lscratch); 2659 } 2660 2661 __ bind(notChar); 2662 // cmp(Rflags, stos); 2663 __ br(Assembler::notEqual, false, Assembler::pt, notShort); 2664 __ delayed()->cmp(Rflags, ftos); 2665 2666 // stos (short) 2667 { 2668 __ pop_i(); 2669 if (!is_static) pop_and_check_object(Rclass); 2670 __ sth(Otos_i, Rclass, Roffset); 2671 if (!is_static && rc == may_rewrite) { 2672 patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no); 2673 } 2674 __ ba(checkVolatile); 2675 __ delayed()->tst(Lscratch); 2676 } 2677 2678 __ bind(notShort); 2679 // cmp(Rflags, ftos); 2680 __ br(Assembler::notZero, false, Assembler::pt, notFloat); 2681 __ delayed()->nop(); 2682 2683 // ftos 2684 { 2685 __ pop_f(); 2686 if (!is_static) pop_and_check_object(Rclass); 2687 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); 2688 if (!is_static && rc == may_rewrite) { 2689 patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no); 2690 } 2691 __ ba(checkVolatile); 2692 __ delayed()->tst(Lscratch); 2693 } 2694 2695 __ bind(notFloat); 2696 2697 // dtos 2698 { 2699 __ pop_d(); 2700 if (!is_static) pop_and_check_object(Rclass); 2701 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); 2702 if (!is_static && rc == may_rewrite) { 2703 patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no); 2704 } 2705 } 2706 2707 __ bind(checkVolatile); 2708 __ tst(Lscratch); 2709 2710 if (__ membar_has_effect(write_bits)) { 2711 // __ tst(Lscratch); in delay slot 2712 __ br(Assembler::zero, false, Assembler::pt, exit); 2713 __ delayed()->nop(); 2714 volatile_barrier(Assembler::StoreLoad); 2715 __ bind(exit); 2716 } 2717 } 2718 2719 void TemplateTable::fast_storefield(TosState state) { 2720 transition(state, vtos); 2721 Register Rcache = G3_scratch; 2722 Register Rclass = Rcache; 2723 Register Roffset= G4_scratch; 2724 Register Rflags = G1_scratch; 2725 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2726 2727 jvmti_post_fast_field_mod(); 2728 2729 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1); 2730 2731 Assembler::Membar_mask_bits read_bits = 2732 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); 2733 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; 2734 2735 Label notVolatile, checkVolatile, exit; 2736 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { 2737 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2738 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2739 __ and3(Rflags, Lscratch, Lscratch); 2740 if (__ membar_has_effect(read_bits)) { 2741 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); 2742 volatile_barrier(read_bits); 2743 __ bind(notVolatile); 2744 } 2745 } 2746 2747 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2748 pop_and_check_object(Rclass); 2749 2750 switch (bytecode()) { 2751 case Bytecodes::_fast_zputfield: __ and3(Otos_i, 1, Otos_i); // fall through to bputfield 2752 case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break; 2753 case Bytecodes::_fast_cputfield: /* fall through */ 2754 case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break; 2755 case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break; 2756 case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break; 2757 case Bytecodes::_fast_fputfield: 2758 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); 2759 break; 2760 case Bytecodes::_fast_dputfield: 2761 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); 2762 break; 2763 case Bytecodes::_fast_aputfield: 2764 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2765 break; 2766 default: 2767 ShouldNotReachHere(); 2768 } 2769 2770 if (__ membar_has_effect(write_bits)) { 2771 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, exit); 2772 volatile_barrier(Assembler::StoreLoad); 2773 __ bind(exit); 2774 } 2775 } 2776 2777 void TemplateTable::putfield(int byte_no) { 2778 putfield_or_static(byte_no, false); 2779 } 2780 2781 void TemplateTable::nofast_putfield(int byte_no) { 2782 putfield_or_static(byte_no, false, may_not_rewrite); 2783 } 2784 2785 void TemplateTable::putstatic(int byte_no) { 2786 putfield_or_static(byte_no, true); 2787 } 2788 2789 void TemplateTable::fast_xaccess(TosState state) { 2790 transition(vtos, state); 2791 Register Rcache = G3_scratch; 2792 Register Roffset = G4_scratch; 2793 Register Rflags = G4_scratch; 2794 Register Rreceiver = Lscratch; 2795 2796 __ ld_ptr(Llocals, 0, Rreceiver); 2797 2798 // access constant pool cache (is resolved) 2799 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2); 2800 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset); 2801 __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp 2802 2803 __ verify_oop(Rreceiver); 2804 __ null_check(Rreceiver); 2805 if (state == atos) { 2806 __ load_heap_oop(Rreceiver, Roffset, Otos_i); 2807 } else if (state == itos) { 2808 __ ld (Rreceiver, Roffset, Otos_i) ; 2809 } else if (state == ftos) { 2810 __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f); 2811 } else { 2812 ShouldNotReachHere(); 2813 } 2814 2815 Assembler::Membar_mask_bits membar_bits = 2816 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2817 if (__ membar_has_effect(membar_bits)) { 2818 2819 // Get is_volatile value in Rflags and check if membar is needed 2820 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags); 2821 2822 // Test volatile 2823 Label notVolatile; 2824 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2825 __ btst(Rflags, Lscratch); 2826 __ br(Assembler::zero, false, Assembler::pt, notVolatile); 2827 __ delayed()->nop(); 2828 volatile_barrier(membar_bits); 2829 __ bind(notVolatile); 2830 } 2831 2832 __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 2833 __ sub(Lbcp, 1, Lbcp); 2834 } 2835 2836 //---------------------------------------------------------------------------------------------------- 2837 // Calls 2838 2839 void TemplateTable::count_calls(Register method, Register temp) { 2840 // implemented elsewhere 2841 ShouldNotReachHere(); 2842 } 2843 2844 void TemplateTable::prepare_invoke(int byte_no, 2845 Register method, // linked method (or i-klass) 2846 Register ra, // return address 2847 Register index, // itable index, MethodType, etc. 2848 Register recv, // if caller wants to see it 2849 Register flags // if caller wants to test it 2850 ) { 2851 // determine flags 2852 const Bytecodes::Code code = bytecode(); 2853 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 2854 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 2855 const bool is_invokehandle = code == Bytecodes::_invokehandle; 2856 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 2857 const bool is_invokespecial = code == Bytecodes::_invokespecial; 2858 const bool load_receiver = (recv != noreg); 2859 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 2860 assert(recv == noreg || recv == O0, ""); 2861 assert(flags == noreg || flags == O1, ""); 2862 2863 // setup registers & access constant pool cache 2864 if (recv == noreg) recv = O0; 2865 if (flags == noreg) flags = O1; 2866 const Register temp = O2; 2867 assert_different_registers(method, ra, index, recv, flags, temp); 2868 2869 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); 2870 2871 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 2872 2873 // maybe push appendix to arguments 2874 if (is_invokedynamic || is_invokehandle) { 2875 Label L_no_push; 2876 __ set((1 << ConstantPoolCacheEntry::has_appendix_shift), temp); 2877 __ btst(flags, temp); 2878 __ br(Assembler::zero, false, Assembler::pt, L_no_push); 2879 __ delayed()->nop(); 2880 // Push the appendix as a trailing parameter. 2881 // This must be done before we get the receiver, 2882 // since the parameter_size includes it. 2883 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0"); 2884 __ load_resolved_reference_at_index(temp, index); 2885 __ verify_oop(temp); 2886 __ push_ptr(temp); // push appendix (MethodType, CallSite, etc.) 2887 __ bind(L_no_push); 2888 } 2889 2890 // load receiver if needed (after appendix is pushed so parameter size is correct) 2891 if (load_receiver) { 2892 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, temp); // get parameter size 2893 __ load_receiver(temp, recv); // __ argument_address uses Gargs but we need Lesp 2894 __ verify_oop(recv); 2895 } 2896 2897 // compute return type 2898 __ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra); 2899 // Make sure we don't need to mask flags after the above shift 2900 ConstantPoolCacheEntry::verify_tos_state_shift(); 2901 // load return address 2902 { 2903 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 2904 AddressLiteral table(table_addr); 2905 __ set(table, temp); 2906 __ sll(ra, LogBytesPerWord, ra); 2907 __ ld_ptr(Address(temp, ra), ra); 2908 } 2909 } 2910 2911 2912 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) { 2913 Register Rtemp = G4_scratch; 2914 Register Rcall = Rindex; 2915 assert_different_registers(Rcall, G5_method, Gargs, Rret); 2916 2917 // get target Method* & entry point 2918 __ lookup_virtual_method(Rrecv, Rindex, G5_method); 2919 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 2920 __ profile_called_method(G5_method, Rtemp); 2921 __ call_from_interpreter(Rcall, Gargs, Rret); 2922 } 2923 2924 void TemplateTable::invokevirtual(int byte_no) { 2925 transition(vtos, vtos); 2926 assert(byte_no == f2_byte, "use this argument"); 2927 2928 Register Rscratch = G3_scratch; 2929 Register Rtemp = G4_scratch; 2930 Register Rret = Lscratch; 2931 Register O0_recv = O0; 2932 Label notFinal; 2933 2934 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false); 2935 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 2936 2937 // Check for vfinal 2938 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), G4_scratch); 2939 __ btst(Rret, G4_scratch); 2940 __ br(Assembler::zero, false, Assembler::pt, notFinal); 2941 __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters 2942 2943 if (RewriteBytecodes && !UseSharedSpaces && !DumpSharedSpaces) { 2944 patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp); 2945 } 2946 2947 invokevfinal_helper(Rscratch, Rret); 2948 2949 __ bind(notFinal); 2950 2951 __ mov(G5_method, Rscratch); // better scratch register 2952 __ load_receiver(G4_scratch, O0_recv); // gets receiverOop 2953 // receiver is in O0_recv 2954 __ verify_oop(O0_recv); 2955 2956 // get return address 2957 AddressLiteral table(Interpreter::invoke_return_entry_table()); 2958 __ set(table, Rtemp); 2959 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type 2960 // Make sure we don't need to mask Rret after the above shift 2961 ConstantPoolCacheEntry::verify_tos_state_shift(); 2962 __ sll(Rret, LogBytesPerWord, Rret); 2963 __ ld_ptr(Rtemp, Rret, Rret); // get return address 2964 2965 // get receiver klass 2966 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); 2967 __ load_klass(O0_recv, O0_recv); 2968 __ verify_klass_ptr(O0_recv); 2969 2970 __ profile_virtual_call(O0_recv, O4); 2971 2972 generate_vtable_call(O0_recv, Rscratch, Rret); 2973 } 2974 2975 void TemplateTable::fast_invokevfinal(int byte_no) { 2976 transition(vtos, vtos); 2977 assert(byte_no == f2_byte, "use this argument"); 2978 2979 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true, 2980 /*is_invokevfinal*/true, false); 2981 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 2982 invokevfinal_helper(G3_scratch, Lscratch); 2983 } 2984 2985 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) { 2986 Register Rtemp = G4_scratch; 2987 2988 // Load receiver from stack slot 2989 __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G4_scratch); 2990 __ lduh(G4_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), G4_scratch); 2991 __ load_receiver(G4_scratch, O0); 2992 2993 // receiver NULL check 2994 __ null_check(O0); 2995 2996 __ profile_final_call(O4); 2997 __ profile_arguments_type(G5_method, Rscratch, Gargs, true); 2998 2999 // get return address 3000 AddressLiteral table(Interpreter::invoke_return_entry_table()); 3001 __ set(table, Rtemp); 3002 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type 3003 // Make sure we don't need to mask Rret after the above shift 3004 ConstantPoolCacheEntry::verify_tos_state_shift(); 3005 __ sll(Rret, LogBytesPerWord, Rret); 3006 __ ld_ptr(Rtemp, Rret, Rret); // get return address 3007 3008 3009 // do the call 3010 __ call_from_interpreter(Rscratch, Gargs, Rret); 3011 } 3012 3013 3014 void TemplateTable::invokespecial(int byte_no) { 3015 transition(vtos, vtos); 3016 assert(byte_no == f1_byte, "use this argument"); 3017 3018 const Register Rret = Lscratch; 3019 const Register O0_recv = O0; 3020 const Register Rscratch = G3_scratch; 3021 3022 prepare_invoke(byte_no, G5_method, Rret, noreg, O0_recv); // get receiver also for null check 3023 __ null_check(O0_recv); 3024 3025 // do the call 3026 __ profile_call(O4); 3027 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3028 __ call_from_interpreter(Rscratch, Gargs, Rret); 3029 } 3030 3031 3032 void TemplateTable::invokestatic(int byte_no) { 3033 transition(vtos, vtos); 3034 assert(byte_no == f1_byte, "use this argument"); 3035 3036 const Register Rret = Lscratch; 3037 const Register Rscratch = G3_scratch; 3038 3039 prepare_invoke(byte_no, G5_method, Rret); // get f1 Method* 3040 3041 // do the call 3042 __ profile_call(O4); 3043 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3044 __ call_from_interpreter(Rscratch, Gargs, Rret); 3045 } 3046 3047 void TemplateTable::invokeinterface_object_method(Register RKlass, 3048 Register Rcall, 3049 Register Rret, 3050 Register Rflags) { 3051 Register Rscratch = G4_scratch; 3052 Register Rindex = Lscratch; 3053 3054 assert_different_registers(Rscratch, Rindex, Rret); 3055 3056 Label notFinal; 3057 3058 // Check for vfinal 3059 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch); 3060 __ btst(Rflags, Rscratch); 3061 __ br(Assembler::zero, false, Assembler::pt, notFinal); 3062 __ delayed()->nop(); 3063 3064 __ profile_final_call(O4); 3065 3066 // do the call - the index (f2) contains the Method* 3067 assert_different_registers(G5_method, Gargs, Rcall); 3068 __ mov(Rindex, G5_method); 3069 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 3070 __ call_from_interpreter(Rcall, Gargs, Rret); 3071 __ bind(notFinal); 3072 3073 __ profile_virtual_call(RKlass, O4); 3074 generate_vtable_call(RKlass, Rindex, Rret); 3075 } 3076 3077 3078 void TemplateTable::invokeinterface(int byte_no) { 3079 transition(vtos, vtos); 3080 assert(byte_no == f1_byte, "use this argument"); 3081 3082 const Register Rinterface = G1_scratch; 3083 const Register Rmethod = Lscratch; 3084 const Register Rret = G3_scratch; 3085 const Register O0_recv = O0; 3086 const Register O1_flags = O1; 3087 const Register O2_Klass = O2; 3088 const Register Rscratch = G4_scratch; 3089 assert_different_registers(Rscratch, G5_method); 3090 3091 prepare_invoke(byte_no, Rinterface, Rret, Rmethod, O0_recv, O1_flags); 3092 3093 // get receiver klass 3094 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); 3095 __ load_klass(O0_recv, O2_Klass); 3096 3097 // Special case of invokeinterface called for virtual method of 3098 // java.lang.Object. See cpCacheOop.cpp for details. 3099 // This code isn't produced by javac, but could be produced by 3100 // another compliant java compiler. 3101 Label notMethod; 3102 __ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch); 3103 __ btst(O1_flags, Rscratch); 3104 __ br(Assembler::zero, false, Assembler::pt, notMethod); 3105 __ delayed()->nop(); 3106 3107 invokeinterface_object_method(O2_Klass, Rinterface, Rret, O1_flags); 3108 3109 __ bind(notMethod); 3110 3111 Register Rtemp = O1_flags; 3112 3113 Label L_no_such_interface; 3114 3115 // Receiver subtype check against REFC. 3116 __ lookup_interface_method(// inputs: rec. class, interface, itable index 3117 O2_Klass, Rinterface, noreg, 3118 // outputs: temp reg1, temp reg2, temp reg3 3119 G5_method, Rscratch, Rtemp, 3120 L_no_such_interface, 3121 /*return_method=*/false); 3122 3123 __ profile_virtual_call(O2_Klass, O4); 3124 3125 // 3126 // find entry point to call 3127 // 3128 3129 // Get declaring interface class from method 3130 __ ld_ptr(Rmethod, Method::const_offset(), Rinterface); 3131 __ ld_ptr(Rinterface, ConstMethod::constants_offset(), Rinterface); 3132 __ ld_ptr(Rinterface, ConstantPool::pool_holder_offset_in_bytes(), Rinterface); 3133 3134 // Get itable index from method 3135 const Register Rindex = G5_method; 3136 __ ld(Rmethod, Method::itable_index_offset(), Rindex); 3137 __ sub(Rindex, Method::itable_index_max, Rindex); 3138 __ neg(Rindex); 3139 3140 // Preserve O2_Klass for throw_AbstractMethodErrorVerbose 3141 __ mov(O2_Klass, O4); 3142 __ lookup_interface_method(// inputs: rec. class, interface, itable index 3143 O4, Rinterface, Rindex, 3144 // outputs: method, scan temp reg, temp reg 3145 G5_method, Rscratch, Rtemp, 3146 L_no_such_interface); 3147 3148 // Check for abstract method error. 3149 { 3150 Label ok; 3151 __ br_notnull_short(G5_method, Assembler::pt, ok); 3152 // Pass arguments for generating a verbose error message. 3153 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), 3154 O2_Klass, Rmethod); 3155 __ should_not_reach_here(); 3156 __ bind(ok); 3157 } 3158 3159 Register Rcall = Rinterface; 3160 assert_different_registers(Rcall, G5_method, Gargs, Rret); 3161 3162 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 3163 __ profile_called_method(G5_method, Rscratch); 3164 __ call_from_interpreter(Rcall, Gargs, Rret); 3165 3166 __ bind(L_no_such_interface); 3167 // Pass arguments for generating a verbose error message. 3168 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose), 3169 O2_Klass, Rinterface); 3170 __ should_not_reach_here(); 3171 } 3172 3173 void TemplateTable::invokehandle(int byte_no) { 3174 transition(vtos, vtos); 3175 assert(byte_no == f1_byte, "use this argument"); 3176 3177 const Register Rret = Lscratch; 3178 const Register G4_mtype = G4_scratch; 3179 const Register O0_recv = O0; 3180 const Register Rscratch = G3_scratch; 3181 3182 prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv); 3183 __ null_check(O0_recv); 3184 3185 // G4: MethodType object (from cpool->resolved_references[f1], if necessary) 3186 // G5: MH.invokeExact_MT method (from f2) 3187 3188 // Note: G4_mtype is already pushed (if necessary) by prepare_invoke 3189 3190 // do the call 3191 __ verify_oop(G4_mtype); 3192 __ profile_final_call(O4); // FIXME: profile the LambdaForm also 3193 __ profile_arguments_type(G5_method, Rscratch, Gargs, true); 3194 __ call_from_interpreter(Rscratch, Gargs, Rret); 3195 } 3196 3197 3198 void TemplateTable::invokedynamic(int byte_no) { 3199 transition(vtos, vtos); 3200 assert(byte_no == f1_byte, "use this argument"); 3201 3202 const Register Rret = Lscratch; 3203 const Register G4_callsite = G4_scratch; 3204 const Register Rscratch = G3_scratch; 3205 3206 prepare_invoke(byte_no, G5_method, Rret, G4_callsite); 3207 3208 // G4: CallSite object (from cpool->resolved_references[f1]) 3209 // G5: MH.linkToCallSite method (from f2) 3210 3211 // Note: G4_callsite is already pushed by prepare_invoke 3212 3213 // %%% should make a type profile for any invokedynamic that takes a ref argument 3214 // profile this call 3215 __ profile_call(O4); 3216 3217 // do the call 3218 __ verify_oop(G4_callsite); 3219 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3220 __ call_from_interpreter(Rscratch, Gargs, Rret); 3221 } 3222 3223 3224 //---------------------------------------------------------------------------------------------------- 3225 // Allocation 3226 3227 void TemplateTable::_new() { 3228 transition(vtos, atos); 3229 3230 Label slow_case; 3231 Label done; 3232 Label initialize_header; 3233 Label initialize_object; // including clearing the fields 3234 3235 Register RallocatedObject = Otos_i; 3236 Register RinstanceKlass = O1; 3237 Register Roffset = O3; 3238 Register Rscratch = O4; 3239 3240 __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3241 __ get_cpool_and_tags(Rscratch, G3_scratch); 3242 // make sure the class we're about to instantiate has been resolved 3243 // This is done before loading InstanceKlass to be consistent with the order 3244 // how Constant Pool is updated (see ConstantPool::klass_at_put) 3245 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3246 __ ldub(G3_scratch, Roffset, G3_scratch); 3247 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3248 __ br(Assembler::notEqual, false, Assembler::pn, slow_case); 3249 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3250 // get InstanceKlass 3251 __ load_resolved_klass_at_offset(Rscratch, Roffset, RinstanceKlass); 3252 3253 // make sure klass is fully initialized: 3254 __ ldub(RinstanceKlass, in_bytes(InstanceKlass::init_state_offset()), G3_scratch); 3255 __ cmp(G3_scratch, InstanceKlass::fully_initialized); 3256 __ br(Assembler::notEqual, false, Assembler::pn, slow_case); 3257 __ delayed()->ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset); 3258 3259 // get instance_size in InstanceKlass (already aligned) 3260 //__ ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset); 3261 3262 // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class 3263 __ btst(Klass::_lh_instance_slow_path_bit, Roffset); 3264 __ br(Assembler::notZero, false, Assembler::pn, slow_case); 3265 __ delayed()->nop(); 3266 3267 // Allocate the instance: 3268 // If TLAB is enabled: 3269 // Try to allocate in the TLAB. 3270 // If fails, go to the slow path. 3271 // Else If inline contiguous allocations are enabled: 3272 // Try to allocate in eden. 3273 // If fails due to heap end, go to slow path. 3274 // 3275 // If TLAB is enabled OR inline contiguous is enabled: 3276 // Initialize the allocation. 3277 // Exit. 3278 // 3279 // Go to slow path. 3280 3281 const bool allow_shared_alloc = 3282 Universe::heap()->supports_inline_contig_alloc(); 3283 3284 if(UseTLAB) { 3285 Register RoldTopValue = RallocatedObject; 3286 Register RtlabWasteLimitValue = G3_scratch; 3287 Register RnewTopValue = G1_scratch; 3288 Register RendValue = Rscratch; 3289 Register RfreeValue = RnewTopValue; 3290 3291 // check if we can allocate in the TLAB 3292 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject 3293 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_current_end_offset()), RendValue); 3294 __ add(RoldTopValue, Roffset, RnewTopValue); 3295 3296 // if there is enough space, we do not CAS and do not clear 3297 __ cmp(RnewTopValue, RendValue); 3298 if(ZeroTLAB) { 3299 // the fields have already been cleared 3300 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header); 3301 } else { 3302 // initialize both the header and fields 3303 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object); 3304 } 3305 __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3306 3307 // Allocation does not fit in the TLAB. 3308 __ ba_short(slow_case); 3309 } else { 3310 // Allocation in the shared Eden 3311 if (allow_shared_alloc) { 3312 Register RoldTopValue = G1_scratch; 3313 Register RtopAddr = G3_scratch; 3314 Register RnewTopValue = RallocatedObject; 3315 Register RendValue = Rscratch; 3316 3317 __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr); 3318 3319 Label retry; 3320 __ bind(retry); 3321 __ set((intptr_t)Universe::heap()->end_addr(), RendValue); 3322 __ ld_ptr(RendValue, 0, RendValue); 3323 __ ld_ptr(RtopAddr, 0, RoldTopValue); 3324 __ add(RoldTopValue, Roffset, RnewTopValue); 3325 3326 // RnewTopValue contains the top address after the new object 3327 // has been allocated. 3328 __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case); 3329 3330 __ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue); 3331 3332 // if someone beat us on the allocation, try again, otherwise continue 3333 __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry); 3334 3335 // bump total bytes allocated by this thread 3336 // RoldTopValue and RtopAddr are dead, so can use G1 and G3 3337 __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch); 3338 } 3339 } 3340 3341 // If UseTLAB or allow_shared_alloc are true, the object is created above and 3342 // there is an initialize need. Otherwise, skip and go to the slow path. 3343 if (UseTLAB || allow_shared_alloc) { 3344 // clear object fields 3345 __ bind(initialize_object); 3346 __ deccc(Roffset, sizeof(oopDesc)); 3347 __ br(Assembler::zero, false, Assembler::pt, initialize_header); 3348 __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch); 3349 3350 // initialize remaining object fields 3351 if (UseBlockZeroing) { 3352 // Use BIS for zeroing 3353 __ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header); 3354 } else { 3355 Label loop; 3356 __ subcc(Roffset, wordSize, Roffset); 3357 __ bind(loop); 3358 //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot 3359 __ st_ptr(G0, G3_scratch, Roffset); 3360 __ br(Assembler::notEqual, false, Assembler::pt, loop); 3361 __ delayed()->subcc(Roffset, wordSize, Roffset); 3362 } 3363 __ ba_short(initialize_header); 3364 } 3365 3366 // slow case 3367 __ bind(slow_case); 3368 __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned); 3369 __ get_constant_pool(O1); 3370 3371 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2); 3372 3373 __ ba_short(done); 3374 3375 // Initialize the header: mark, klass 3376 __ bind(initialize_header); 3377 3378 if (UseBiasedLocking) { 3379 __ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch); 3380 } else { 3381 __ set((intptr_t)markOopDesc::prototype(), G4_scratch); 3382 } 3383 __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark 3384 __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed 3385 __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms) 3386 3387 { 3388 SkipIfEqual skip_if( 3389 _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero); 3390 // Trigger dtrace event 3391 __ push(atos); 3392 __ call_VM_leaf(noreg, 3393 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0); 3394 __ pop(atos); 3395 } 3396 3397 // continue 3398 __ bind(done); 3399 } 3400 3401 3402 3403 void TemplateTable::newarray() { 3404 transition(itos, atos); 3405 __ ldub(Lbcp, 1, O1); 3406 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i); 3407 } 3408 3409 3410 void TemplateTable::anewarray() { 3411 transition(itos, atos); 3412 __ get_constant_pool(O1); 3413 __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned); 3414 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i); 3415 } 3416 3417 3418 void TemplateTable::arraylength() { 3419 transition(atos, itos); 3420 Label ok; 3421 __ verify_oop(Otos_i); 3422 __ tst(Otos_i); 3423 __ throw_if_not_1_x( Assembler::notZero, ok ); 3424 __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i); 3425 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok); 3426 } 3427 3428 3429 void TemplateTable::checkcast() { 3430 transition(atos, atos); 3431 Label done, is_null, quicked, cast_ok, resolved; 3432 Register Roffset = G1_scratch; 3433 Register RobjKlass = O5; 3434 Register RspecifiedKlass = O4; 3435 3436 // Check for casting a NULL 3437 __ br_null(Otos_i, false, Assembler::pn, is_null); 3438 __ delayed()->nop(); 3439 3440 // Get value klass in RobjKlass 3441 __ load_klass(Otos_i, RobjKlass); // get value klass 3442 3443 // Get constant pool tag 3444 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3445 3446 // See if the checkcast has been quickened 3447 __ get_cpool_and_tags(Lscratch, G3_scratch); 3448 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3449 __ ldub(G3_scratch, Roffset, G3_scratch); 3450 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3451 __ br(Assembler::equal, true, Assembler::pt, quicked); 3452 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3453 3454 __ push_ptr(); // save receiver for result, and for GC 3455 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); 3456 __ get_vm_result_2(RspecifiedKlass); 3457 __ pop_ptr(Otos_i, G3_scratch); // restore receiver 3458 3459 __ ba_short(resolved); 3460 3461 // Extract target class from constant pool 3462 __ bind(quicked); 3463 __ load_resolved_klass_at_offset(Lscratch, Roffset, RspecifiedKlass); 3464 3465 3466 __ bind(resolved); 3467 __ load_klass(Otos_i, RobjKlass); // get value klass 3468 3469 // Generate a fast subtype check. Branch to cast_ok if no 3470 // failure. Throw exception if failure. 3471 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok ); 3472 3473 // Not a subtype; so must throw exception 3474 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch ); 3475 3476 __ bind(cast_ok); 3477 3478 if (ProfileInterpreter) { 3479 __ ba_short(done); 3480 } 3481 __ bind(is_null); 3482 __ profile_null_seen(G3_scratch); 3483 __ bind(done); 3484 } 3485 3486 3487 void TemplateTable::instanceof() { 3488 Label done, is_null, quicked, resolved; 3489 transition(atos, itos); 3490 Register Roffset = G1_scratch; 3491 Register RobjKlass = O5; 3492 Register RspecifiedKlass = O4; 3493 3494 // Check for casting a NULL 3495 __ br_null(Otos_i, false, Assembler::pt, is_null); 3496 __ delayed()->nop(); 3497 3498 // Get value klass in RobjKlass 3499 __ load_klass(Otos_i, RobjKlass); // get value klass 3500 3501 // Get constant pool tag 3502 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3503 3504 // See if the checkcast has been quickened 3505 __ get_cpool_and_tags(Lscratch, G3_scratch); 3506 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3507 __ ldub(G3_scratch, Roffset, G3_scratch); 3508 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3509 __ br(Assembler::equal, true, Assembler::pt, quicked); 3510 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3511 3512 __ push_ptr(); // save receiver for result, and for GC 3513 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); 3514 __ get_vm_result_2(RspecifiedKlass); 3515 __ pop_ptr(Otos_i, G3_scratch); // restore receiver 3516 3517 __ ba_short(resolved); 3518 3519 // Extract target class from constant pool 3520 __ bind(quicked); 3521 __ get_constant_pool(Lscratch); 3522 __ load_resolved_klass_at_offset(Lscratch, Roffset, RspecifiedKlass); 3523 3524 __ bind(resolved); 3525 __ load_klass(Otos_i, RobjKlass); // get value klass 3526 3527 // Generate a fast subtype check. Branch to cast_ok if no 3528 // failure. Return 0 if failure. 3529 __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed 3530 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done ); 3531 // Not a subtype; return 0; 3532 __ clr( Otos_i ); 3533 3534 if (ProfileInterpreter) { 3535 __ ba_short(done); 3536 } 3537 __ bind(is_null); 3538 __ profile_null_seen(G3_scratch); 3539 __ bind(done); 3540 } 3541 3542 void TemplateTable::_breakpoint() { 3543 3544 // Note: We get here even if we are single stepping.. 3545 // jbug insists on setting breakpoints at every bytecode 3546 // even if we are in single step mode. 3547 3548 transition(vtos, vtos); 3549 // get the unpatched byte code 3550 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp); 3551 __ mov(O0, Lbyte_code); 3552 3553 // post the breakpoint event 3554 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp); 3555 3556 // complete the execution of original bytecode 3557 __ dispatch_normal(vtos); 3558 } 3559 3560 3561 //---------------------------------------------------------------------------------------------------- 3562 // Exceptions 3563 3564 void TemplateTable::athrow() { 3565 transition(atos, vtos); 3566 3567 // This works because exception is cached in Otos_i which is same as O0, 3568 // which is same as what throw_exception_entry_expects 3569 assert(Otos_i == Oexception, "see explanation above"); 3570 3571 __ verify_oop(Otos_i); 3572 __ null_check(Otos_i); 3573 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch); 3574 } 3575 3576 3577 //---------------------------------------------------------------------------------------------------- 3578 // Synchronization 3579 3580 3581 // See frame_sparc.hpp for monitor block layout. 3582 // Monitor elements are dynamically allocated by growing stack as needed. 3583 3584 void TemplateTable::monitorenter() { 3585 transition(atos, vtos); 3586 __ verify_oop(Otos_i); 3587 // Try to acquire a lock on the object 3588 // Repeat until succeeded (i.e., until 3589 // monitorenter returns true). 3590 3591 { Label ok; 3592 __ tst(Otos_i); 3593 __ throw_if_not_1_x( Assembler::notZero, ok); 3594 __ delayed()->mov(Otos_i, Lscratch); // save obj 3595 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok); 3596 } 3597 3598 assert(O0 == Otos_i, "Be sure where the object to lock is"); 3599 3600 // find a free slot in the monitor block 3601 3602 3603 // initialize entry pointer 3604 __ clr(O1); // points to free slot or NULL 3605 3606 { 3607 Label entry, loop, exit; 3608 __ add( __ top_most_monitor(), O2 ); // last one to check 3609 __ ba( entry ); 3610 __ delayed()->mov( Lmonitors, O3 ); // first one to check 3611 3612 3613 __ bind( loop ); 3614 3615 __ verify_oop(O4); // verify each monitor's oop 3616 __ tst(O4); // is this entry unused? 3617 __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1); 3618 3619 __ cmp(O4, O0); // check if current entry is for same object 3620 __ brx( Assembler::equal, false, Assembler::pn, exit ); 3621 __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one 3622 3623 __ bind( entry ); 3624 3625 __ cmp( O3, O2 ); 3626 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop ); 3627 __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4); 3628 3629 __ bind( exit ); 3630 } 3631 3632 { Label allocated; 3633 3634 // found free slot? 3635 __ br_notnull_short(O1, Assembler::pn, allocated); 3636 3637 __ add_monitor_to_stack( false, O2, O3 ); 3638 __ mov(Lmonitors, O1); 3639 3640 __ bind(allocated); 3641 } 3642 3643 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 3644 // The object has already been poped from the stack, so the expression stack looks correct. 3645 __ inc(Lbcp); 3646 3647 __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object 3648 __ lock_object(O1, O0); 3649 3650 // check if there's enough space on the stack for the monitors after locking 3651 __ generate_stack_overflow_check(0); 3652 3653 // The bcp has already been incremented. Just need to dispatch to next instruction. 3654 __ dispatch_next(vtos); 3655 } 3656 3657 3658 void TemplateTable::monitorexit() { 3659 transition(atos, vtos); 3660 __ verify_oop(Otos_i); 3661 __ tst(Otos_i); 3662 __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch ); 3663 3664 assert(O0 == Otos_i, "just checking"); 3665 3666 { Label entry, loop, found; 3667 __ add( __ top_most_monitor(), O2 ); // last one to check 3668 __ ba(entry); 3669 // use Lscratch to hold monitor elem to check, start with most recent monitor, 3670 // By using a local it survives the call to the C routine. 3671 __ delayed()->mov( Lmonitors, Lscratch ); 3672 3673 __ bind( loop ); 3674 3675 __ verify_oop(O4); // verify each monitor's oop 3676 __ cmp(O4, O0); // check if current entry is for desired object 3677 __ brx( Assembler::equal, true, Assembler::pt, found ); 3678 __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit 3679 3680 __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next 3681 3682 __ bind( entry ); 3683 3684 __ cmp( Lscratch, O2 ); 3685 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop ); 3686 __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4); 3687 3688 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 3689 __ should_not_reach_here(); 3690 3691 __ bind(found); 3692 } 3693 __ unlock_object(O1); 3694 } 3695 3696 3697 //---------------------------------------------------------------------------------------------------- 3698 // Wide instructions 3699 3700 void TemplateTable::wide() { 3701 transition(vtos, vtos); 3702 __ ldub(Lbcp, 1, G3_scratch);// get next bc 3703 __ sll(G3_scratch, LogBytesPerWord, G3_scratch); 3704 AddressLiteral ep(Interpreter::_wentry_point); 3705 __ set(ep, G4_scratch); 3706 __ ld_ptr(G4_scratch, G3_scratch, G3_scratch); 3707 __ jmp(G3_scratch, G0); 3708 __ delayed()->nop(); 3709 // Note: the Lbcp increment step is part of the individual wide bytecode implementations 3710 } 3711 3712 3713 //---------------------------------------------------------------------------------------------------- 3714 // Multi arrays 3715 3716 void TemplateTable::multianewarray() { 3717 transition(vtos, atos); 3718 // put ndims * wordSize into Lscratch 3719 __ ldub( Lbcp, 3, Lscratch); 3720 __ sll( Lscratch, Interpreter::logStackElementSize, Lscratch); 3721 // Lesp points past last_dim, so set to O1 to first_dim address 3722 __ add( Lesp, Lscratch, O1); 3723 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1); 3724 __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack 3725 }