1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interpreter/interpreter.hpp" 27 #include "interpreter/interpreterRuntime.hpp" 28 #include "interpreter/interp_masm.hpp" 29 #include "interpreter/templateTable.hpp" 30 #include "memory/universe.inline.hpp" 31 #include "oops/methodData.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/sharedRuntime.hpp" 36 #include "runtime/stubRoutines.hpp" 37 #include "runtime/synchronizer.hpp" 38 #include "utilities/macros.hpp" 39 40 #define __ _masm-> 41 42 // Misc helpers 43 44 // Do an oop store like *(base + index + offset) = val 45 // index can be noreg, 46 static void do_oop_store(InterpreterMacroAssembler* _masm, 47 Register base, 48 Register index, 49 int offset, 50 Register val, 51 Register tmp, 52 BarrierSet::Name barrier, 53 bool precise) { 54 assert(tmp != val && tmp != base && tmp != index, "register collision"); 55 assert(index == noreg || offset == 0, "only one offset"); 56 switch (barrier) { 57 #if INCLUDE_ALL_GCS 58 case BarrierSet::G1SATBCTLogging: 59 { 60 // Load and record the previous value. 61 __ g1_write_barrier_pre(base, index, offset, 62 noreg /* pre_val */, 63 tmp, true /*preserve_o_regs*/); 64 65 // G1 barrier needs uncompressed oop for region cross check. 66 Register new_val = val; 67 if (UseCompressedOops && val != G0) { 68 new_val = tmp; 69 __ mov(val, new_val); 70 } 71 72 if (index == noreg ) { 73 assert(Assembler::is_simm13(offset), "fix this code"); 74 __ store_heap_oop(val, base, offset); 75 } else { 76 __ store_heap_oop(val, base, index); 77 } 78 79 // No need for post barrier if storing NULL 80 if (val != G0) { 81 if (precise) { 82 if (index == noreg) { 83 __ add(base, offset, base); 84 } else { 85 __ add(base, index, base); 86 } 87 } 88 __ g1_write_barrier_post(base, new_val, tmp); 89 } 90 } 91 break; 92 #endif // INCLUDE_ALL_GCS 93 case BarrierSet::CardTableForRS: 94 case BarrierSet::CardTableExtension: 95 { 96 if (index == noreg ) { 97 assert(Assembler::is_simm13(offset), "fix this code"); 98 __ store_heap_oop(val, base, offset); 99 } else { 100 __ store_heap_oop(val, base, index); 101 } 102 // No need for post barrier if storing NULL 103 if (val != G0) { 104 if (precise) { 105 if (index == noreg) { 106 __ add(base, offset, base); 107 } else { 108 __ add(base, index, base); 109 } 110 } 111 __ card_write_barrier_post(base, val, tmp); 112 } 113 } 114 break; 115 case BarrierSet::ModRef: 116 ShouldNotReachHere(); 117 break; 118 default : 119 ShouldNotReachHere(); 120 121 } 122 } 123 124 125 //---------------------------------------------------------------------------------------------------- 126 // Platform-dependent initialization 127 128 void TemplateTable::pd_initialize() { 129 // (none) 130 } 131 132 133 //---------------------------------------------------------------------------------------------------- 134 // Condition conversion 135 Assembler::Condition ccNot(TemplateTable::Condition cc) { 136 switch (cc) { 137 case TemplateTable::equal : return Assembler::notEqual; 138 case TemplateTable::not_equal : return Assembler::equal; 139 case TemplateTable::less : return Assembler::greaterEqual; 140 case TemplateTable::less_equal : return Assembler::greater; 141 case TemplateTable::greater : return Assembler::lessEqual; 142 case TemplateTable::greater_equal: return Assembler::less; 143 } 144 ShouldNotReachHere(); 145 return Assembler::zero; 146 } 147 148 //---------------------------------------------------------------------------------------------------- 149 // Miscelaneous helper routines 150 151 152 Address TemplateTable::at_bcp(int offset) { 153 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); 154 return Address(Lbcp, offset); 155 } 156 157 158 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, 159 Register temp_reg, bool load_bc_into_bc_reg/*=true*/, 160 int byte_no) { 161 // With sharing on, may need to test Method* flag. 162 if (!RewriteBytecodes) return; 163 Label L_patch_done; 164 165 switch (bc) { 166 case Bytecodes::_fast_aputfield: 167 case Bytecodes::_fast_bputfield: 168 case Bytecodes::_fast_cputfield: 169 case Bytecodes::_fast_dputfield: 170 case Bytecodes::_fast_fputfield: 171 case Bytecodes::_fast_iputfield: 172 case Bytecodes::_fast_lputfield: 173 case Bytecodes::_fast_sputfield: 174 { 175 // We skip bytecode quickening for putfield instructions when 176 // the put_code written to the constant pool cache is zero. 177 // This is required so that every execution of this instruction 178 // calls out to InterpreterRuntime::resolve_get_put to do 179 // additional, required work. 180 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 181 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 182 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1); 183 __ set(bc, bc_reg); 184 __ cmp_and_br_short(temp_reg, 0, Assembler::equal, Assembler::pn, L_patch_done); // don't patch 185 } 186 break; 187 default: 188 assert(byte_no == -1, "sanity"); 189 if (load_bc_into_bc_reg) { 190 __ set(bc, bc_reg); 191 } 192 } 193 194 if (JvmtiExport::can_post_breakpoint()) { 195 Label L_fast_patch; 196 __ ldub(at_bcp(0), temp_reg); 197 __ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch); 198 // perform the quickening, slowly, in the bowels of the breakpoint table 199 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg); 200 __ ba_short(L_patch_done); 201 __ bind(L_fast_patch); 202 } 203 204 #ifdef ASSERT 205 Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc); 206 Label L_okay; 207 __ ldub(at_bcp(0), temp_reg); 208 __ cmp(temp_reg, orig_bytecode); 209 __ br(Assembler::equal, false, Assembler::pt, L_okay); 210 __ delayed()->cmp(temp_reg, bc_reg); 211 __ br(Assembler::equal, false, Assembler::pt, L_okay); 212 __ delayed()->nop(); 213 __ stop("patching the wrong bytecode"); 214 __ bind(L_okay); 215 #endif 216 217 // patch bytecode 218 __ stb(bc_reg, at_bcp(0)); 219 __ bind(L_patch_done); 220 } 221 222 //---------------------------------------------------------------------------------------------------- 223 // Individual instructions 224 225 void TemplateTable::nop() { 226 transition(vtos, vtos); 227 // nothing to do 228 } 229 230 void TemplateTable::shouldnotreachhere() { 231 transition(vtos, vtos); 232 __ stop("shouldnotreachhere bytecode"); 233 } 234 235 void TemplateTable::aconst_null() { 236 transition(vtos, atos); 237 __ clr(Otos_i); 238 } 239 240 241 void TemplateTable::iconst(int value) { 242 transition(vtos, itos); 243 __ set(value, Otos_i); 244 } 245 246 247 void TemplateTable::lconst(int value) { 248 transition(vtos, ltos); 249 assert(value >= 0, "check this code"); 250 #ifdef _LP64 251 __ set(value, Otos_l); 252 #else 253 __ set(value, Otos_l2); 254 __ clr( Otos_l1); 255 #endif 256 } 257 258 259 void TemplateTable::fconst(int value) { 260 transition(vtos, ftos); 261 static float zero = 0.0, one = 1.0, two = 2.0; 262 float* p; 263 switch( value ) { 264 default: ShouldNotReachHere(); 265 case 0: p = &zero; break; 266 case 1: p = &one; break; 267 case 2: p = &two; break; 268 } 269 AddressLiteral a(p); 270 __ sethi(a, G3_scratch); 271 __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f); 272 } 273 274 275 void TemplateTable::dconst(int value) { 276 transition(vtos, dtos); 277 static double zero = 0.0, one = 1.0; 278 double* p; 279 switch( value ) { 280 default: ShouldNotReachHere(); 281 case 0: p = &zero; break; 282 case 1: p = &one; break; 283 } 284 AddressLiteral a(p); 285 __ sethi(a, G3_scratch); 286 __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d); 287 } 288 289 290 // %%%%% Should factore most snippet templates across platforms 291 292 void TemplateTable::bipush() { 293 transition(vtos, itos); 294 __ ldsb( at_bcp(1), Otos_i ); 295 } 296 297 void TemplateTable::sipush() { 298 transition(vtos, itos); 299 __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed); 300 } 301 302 void TemplateTable::ldc(bool wide) { 303 transition(vtos, vtos); 304 Label call_ldc, notInt, isString, notString, notClass, exit; 305 306 if (wide) { 307 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); 308 } else { 309 __ ldub(Lbcp, 1, O1); 310 } 311 __ get_cpool_and_tags(O0, O2); 312 313 const int base_offset = ConstantPool::header_size() * wordSize; 314 const int tags_offset = Array<u1>::base_offset_in_bytes(); 315 316 // get type from tags 317 __ add(O2, tags_offset, O2); 318 __ ldub(O2, O1, O2); 319 320 // unresolved class? If so, must resolve 321 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClass, Assembler::equal, Assembler::pt, call_ldc); 322 323 // unresolved class in error state 324 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClassInError, Assembler::equal, Assembler::pn, call_ldc); 325 326 __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class 327 __ brx(Assembler::notEqual, true, Assembler::pt, notClass); 328 __ delayed()->add(O0, base_offset, O0); 329 330 __ bind(call_ldc); 331 __ set(wide, O1); 332 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1); 333 __ push(atos); 334 __ ba_short(exit); 335 336 __ bind(notClass); 337 // __ add(O0, base_offset, O0); 338 __ sll(O1, LogBytesPerWord, O1); 339 __ cmp(O2, JVM_CONSTANT_Integer); 340 __ brx(Assembler::notEqual, true, Assembler::pt, notInt); 341 __ delayed()->cmp(O2, JVM_CONSTANT_String); 342 __ ld(O0, O1, Otos_i); 343 __ push(itos); 344 __ ba_short(exit); 345 346 __ bind(notInt); 347 // __ cmp(O2, JVM_CONSTANT_String); 348 __ brx(Assembler::notEqual, true, Assembler::pt, notString); 349 __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); 350 __ bind(isString); 351 __ stop("string should be rewritten to fast_aldc"); 352 __ ba_short(exit); 353 354 __ bind(notString); 355 // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); 356 __ push(ftos); 357 358 __ bind(exit); 359 } 360 361 // Fast path for caching oop constants. 362 // %%% We should use this to handle Class and String constants also. 363 // %%% It will simplify the ldc/primitive path considerably. 364 void TemplateTable::fast_aldc(bool wide) { 365 transition(vtos, atos); 366 367 int index_size = wide ? sizeof(u2) : sizeof(u1); 368 Label resolved; 369 370 // We are resolved if the resolved reference cache entry contains a 371 // non-null object (CallSite, etc.) 372 assert_different_registers(Otos_i, G3_scratch); 373 __ get_cache_index_at_bcp(Otos_i, G3_scratch, 1, index_size); // load index => G3_scratch 374 __ load_resolved_reference_at_index(Otos_i, G3_scratch); 375 __ tst(Otos_i); 376 __ br(Assembler::notEqual, false, Assembler::pt, resolved); 377 __ delayed()->set((int)bytecode(), O1); 378 379 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 380 381 // first time invocation - must resolve first 382 __ call_VM(Otos_i, entry, O1); 383 __ bind(resolved); 384 __ verify_oop(Otos_i); 385 } 386 387 void TemplateTable::ldc2_w() { 388 transition(vtos, vtos); 389 Label Long, exit; 390 391 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); 392 __ get_cpool_and_tags(O0, O2); 393 394 const int base_offset = ConstantPool::header_size() * wordSize; 395 const int tags_offset = Array<u1>::base_offset_in_bytes(); 396 // get type from tags 397 __ add(O2, tags_offset, O2); 398 __ ldub(O2, O1, O2); 399 400 __ sll(O1, LogBytesPerWord, O1); 401 __ add(O0, O1, G3_scratch); 402 403 __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, Long); 404 // A double can be placed at word-aligned locations in the constant pool. 405 // Check out Conversions.java for an example. 406 // Also ConstantPool::header_size() is 20, which makes it very difficult 407 // to double-align double on the constant pool. SG, 11/7/97 408 #ifdef _LP64 409 __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d); 410 #else 411 FloatRegister f = Ftos_d; 412 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f); 413 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2, 414 f->successor()); 415 #endif 416 __ push(dtos); 417 __ ba_short(exit); 418 419 __ bind(Long); 420 #ifdef _LP64 421 __ ldx(G3_scratch, base_offset, Otos_l); 422 #else 423 __ ld(G3_scratch, base_offset, Otos_l); 424 __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor()); 425 #endif 426 __ push(ltos); 427 428 __ bind(exit); 429 } 430 431 void TemplateTable::locals_index(Register reg, int offset) { 432 __ ldub( at_bcp(offset), reg ); 433 } 434 435 void TemplateTable::locals_index_wide(Register reg) { 436 // offset is 2, not 1, because Lbcp points to wide prefix code 437 __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned); 438 } 439 440 void TemplateTable::iload() { 441 iload_internal(); 442 } 443 444 void TemplateTable::nofast_iload() { 445 iload_internal(may_not_rewrite); 446 } 447 448 void TemplateTable::iload_internal(RewriteControl rc) { 449 transition(vtos, itos); 450 // Rewrite iload,iload pair into fast_iload2 451 // iload,caload pair into fast_icaload 452 if (RewriteFrequentPairs && rc == may_rewrite) { 453 Label rewrite, done; 454 455 // get next byte 456 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch); 457 458 // if _iload, wait to rewrite to iload2. We only want to rewrite the 459 // last two iloads in a pair. Comparing against fast_iload means that 460 // the next bytecode is neither an iload or a caload, and therefore 461 // an iload pair. 462 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_iload, Assembler::equal, Assembler::pn, done); 463 464 __ cmp(G3_scratch, (int)Bytecodes::_fast_iload); 465 __ br(Assembler::equal, false, Assembler::pn, rewrite); 466 __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch); 467 468 __ cmp(G3_scratch, (int)Bytecodes::_caload); 469 __ br(Assembler::equal, false, Assembler::pn, rewrite); 470 __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch); 471 472 __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again 473 // rewrite 474 // G4_scratch: fast bytecode 475 __ bind(rewrite); 476 patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false); 477 __ bind(done); 478 } 479 480 // Get the local value into tos 481 locals_index(G3_scratch); 482 __ access_local_int( G3_scratch, Otos_i ); 483 } 484 485 void TemplateTable::fast_iload2() { 486 transition(vtos, itos); 487 locals_index(G3_scratch); 488 __ access_local_int( G3_scratch, Otos_i ); 489 __ push_i(); 490 locals_index(G3_scratch, 3); // get next bytecode's local index. 491 __ access_local_int( G3_scratch, Otos_i ); 492 } 493 494 void TemplateTable::fast_iload() { 495 transition(vtos, itos); 496 locals_index(G3_scratch); 497 __ access_local_int( G3_scratch, Otos_i ); 498 } 499 500 void TemplateTable::lload() { 501 transition(vtos, ltos); 502 locals_index(G3_scratch); 503 __ access_local_long( G3_scratch, Otos_l ); 504 } 505 506 507 void TemplateTable::fload() { 508 transition(vtos, ftos); 509 locals_index(G3_scratch); 510 __ access_local_float( G3_scratch, Ftos_f ); 511 } 512 513 514 void TemplateTable::dload() { 515 transition(vtos, dtos); 516 locals_index(G3_scratch); 517 __ access_local_double( G3_scratch, Ftos_d ); 518 } 519 520 521 void TemplateTable::aload() { 522 transition(vtos, atos); 523 locals_index(G3_scratch); 524 __ access_local_ptr( G3_scratch, Otos_i); 525 } 526 527 528 void TemplateTable::wide_iload() { 529 transition(vtos, itos); 530 locals_index_wide(G3_scratch); 531 __ access_local_int( G3_scratch, Otos_i ); 532 } 533 534 535 void TemplateTable::wide_lload() { 536 transition(vtos, ltos); 537 locals_index_wide(G3_scratch); 538 __ access_local_long( G3_scratch, Otos_l ); 539 } 540 541 542 void TemplateTable::wide_fload() { 543 transition(vtos, ftos); 544 locals_index_wide(G3_scratch); 545 __ access_local_float( G3_scratch, Ftos_f ); 546 } 547 548 549 void TemplateTable::wide_dload() { 550 transition(vtos, dtos); 551 locals_index_wide(G3_scratch); 552 __ access_local_double( G3_scratch, Ftos_d ); 553 } 554 555 556 void TemplateTable::wide_aload() { 557 transition(vtos, atos); 558 locals_index_wide(G3_scratch); 559 __ access_local_ptr( G3_scratch, Otos_i ); 560 __ verify_oop(Otos_i); 561 } 562 563 564 void TemplateTable::iaload() { 565 transition(itos, itos); 566 // Otos_i: index 567 // tos: array 568 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3); 569 __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i); 570 } 571 572 573 void TemplateTable::laload() { 574 transition(itos, ltos); 575 // Otos_i: index 576 // O2: array 577 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3); 578 __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l); 579 } 580 581 582 void TemplateTable::faload() { 583 transition(itos, ftos); 584 // Otos_i: index 585 // O2: array 586 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3); 587 __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f); 588 } 589 590 591 void TemplateTable::daload() { 592 transition(itos, dtos); 593 // Otos_i: index 594 // O2: array 595 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3); 596 __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d); 597 } 598 599 600 void TemplateTable::aaload() { 601 transition(itos, atos); 602 // Otos_i: index 603 // tos: array 604 __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3); 605 __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i); 606 __ verify_oop(Otos_i); 607 } 608 609 610 void TemplateTable::baload() { 611 transition(itos, itos); 612 // Otos_i: index 613 // tos: array 614 __ index_check(O2, Otos_i, 0, G3_scratch, O3); 615 __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i); 616 } 617 618 619 void TemplateTable::caload() { 620 transition(itos, itos); 621 // Otos_i: index 622 // tos: array 623 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 624 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i); 625 } 626 627 void TemplateTable::fast_icaload() { 628 transition(vtos, itos); 629 // Otos_i: index 630 // tos: array 631 locals_index(G3_scratch); 632 __ access_local_int( G3_scratch, Otos_i ); 633 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 634 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i); 635 } 636 637 638 void TemplateTable::saload() { 639 transition(itos, itos); 640 // Otos_i: index 641 // tos: array 642 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 643 __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i); 644 } 645 646 647 void TemplateTable::iload(int n) { 648 transition(vtos, itos); 649 __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i ); 650 } 651 652 653 void TemplateTable::lload(int n) { 654 transition(vtos, ltos); 655 assert(n+1 < Argument::n_register_parameters, "would need more code"); 656 __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l); 657 } 658 659 660 void TemplateTable::fload(int n) { 661 transition(vtos, ftos); 662 assert(n < Argument::n_register_parameters, "would need more code"); 663 __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f ); 664 } 665 666 667 void TemplateTable::dload(int n) { 668 transition(vtos, dtos); 669 FloatRegister dst = Ftos_d; 670 __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst); 671 } 672 673 674 void TemplateTable::aload(int n) { 675 transition(vtos, atos); 676 __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i ); 677 } 678 679 void TemplateTable::aload_0() { 680 aload_0_internal(); 681 } 682 683 void TemplateTable::nofast_aload_0() { 684 aload_0_internal(may_not_rewrite); 685 } 686 687 void TemplateTable::aload_0_internal(RewriteControl rc) { 688 transition(vtos, atos); 689 690 // According to bytecode histograms, the pairs: 691 // 692 // _aload_0, _fast_igetfield (itos) 693 // _aload_0, _fast_agetfield (atos) 694 // _aload_0, _fast_fgetfield (ftos) 695 // 696 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0 697 // bytecode checks the next bytecode and then rewrites the current 698 // bytecode into a pair bytecode; otherwise it rewrites the current 699 // bytecode into _fast_aload_0 that doesn't do the pair check anymore. 700 // 701 if (RewriteFrequentPairs && rc == may_rewrite) { 702 Label rewrite, done; 703 704 // get next byte 705 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch); 706 707 // do actual aload_0 708 aload(0); 709 710 // if _getfield then wait with rewrite 711 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_getfield, Assembler::equal, Assembler::pn, done); 712 713 // if _igetfield then rewrite to _fast_iaccess_0 714 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 715 __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield); 716 __ br(Assembler::equal, false, Assembler::pn, rewrite); 717 __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch); 718 719 // if _agetfield then rewrite to _fast_aaccess_0 720 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 721 __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield); 722 __ br(Assembler::equal, false, Assembler::pn, rewrite); 723 __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch); 724 725 // if _fgetfield then rewrite to _fast_faccess_0 726 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 727 __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield); 728 __ br(Assembler::equal, false, Assembler::pn, rewrite); 729 __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch); 730 731 // else rewrite to _fast_aload0 732 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 733 __ set(Bytecodes::_fast_aload_0, G4_scratch); 734 735 // rewrite 736 // G4_scratch: fast bytecode 737 __ bind(rewrite); 738 patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false); 739 __ bind(done); 740 } else { 741 aload(0); 742 } 743 } 744 745 void TemplateTable::istore() { 746 transition(itos, vtos); 747 locals_index(G3_scratch); 748 __ store_local_int( G3_scratch, Otos_i ); 749 } 750 751 752 void TemplateTable::lstore() { 753 transition(ltos, vtos); 754 locals_index(G3_scratch); 755 __ store_local_long( G3_scratch, Otos_l ); 756 } 757 758 759 void TemplateTable::fstore() { 760 transition(ftos, vtos); 761 locals_index(G3_scratch); 762 __ store_local_float( G3_scratch, Ftos_f ); 763 } 764 765 766 void TemplateTable::dstore() { 767 transition(dtos, vtos); 768 locals_index(G3_scratch); 769 __ store_local_double( G3_scratch, Ftos_d ); 770 } 771 772 773 void TemplateTable::astore() { 774 transition(vtos, vtos); 775 __ load_ptr(0, Otos_i); 776 __ inc(Lesp, Interpreter::stackElementSize); 777 __ verify_oop_or_return_address(Otos_i, G3_scratch); 778 locals_index(G3_scratch); 779 __ store_local_ptr(G3_scratch, Otos_i); 780 } 781 782 783 void TemplateTable::wide_istore() { 784 transition(vtos, vtos); 785 __ pop_i(); 786 locals_index_wide(G3_scratch); 787 __ store_local_int( G3_scratch, Otos_i ); 788 } 789 790 791 void TemplateTable::wide_lstore() { 792 transition(vtos, vtos); 793 __ pop_l(); 794 locals_index_wide(G3_scratch); 795 __ store_local_long( G3_scratch, Otos_l ); 796 } 797 798 799 void TemplateTable::wide_fstore() { 800 transition(vtos, vtos); 801 __ pop_f(); 802 locals_index_wide(G3_scratch); 803 __ store_local_float( G3_scratch, Ftos_f ); 804 } 805 806 807 void TemplateTable::wide_dstore() { 808 transition(vtos, vtos); 809 __ pop_d(); 810 locals_index_wide(G3_scratch); 811 __ store_local_double( G3_scratch, Ftos_d ); 812 } 813 814 815 void TemplateTable::wide_astore() { 816 transition(vtos, vtos); 817 __ load_ptr(0, Otos_i); 818 __ inc(Lesp, Interpreter::stackElementSize); 819 __ verify_oop_or_return_address(Otos_i, G3_scratch); 820 locals_index_wide(G3_scratch); 821 __ store_local_ptr(G3_scratch, Otos_i); 822 } 823 824 825 void TemplateTable::iastore() { 826 transition(itos, vtos); 827 __ pop_i(O2); // index 828 // Otos_i: val 829 // O3: array 830 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2); 831 __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT)); 832 } 833 834 835 void TemplateTable::lastore() { 836 transition(ltos, vtos); 837 __ pop_i(O2); // index 838 // Otos_l: val 839 // O3: array 840 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2); 841 __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG)); 842 } 843 844 845 void TemplateTable::fastore() { 846 transition(ftos, vtos); 847 __ pop_i(O2); // index 848 // Ftos_f: val 849 // O3: array 850 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2); 851 __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT)); 852 } 853 854 855 void TemplateTable::dastore() { 856 transition(dtos, vtos); 857 __ pop_i(O2); // index 858 // Fos_d: val 859 // O3: array 860 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2); 861 __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)); 862 } 863 864 865 void TemplateTable::aastore() { 866 Label store_ok, is_null, done; 867 transition(vtos, vtos); 868 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); 869 __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index 870 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array 871 // Otos_i: val 872 // O2: index 873 // O3: array 874 __ verify_oop(Otos_i); 875 __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1); 876 877 // do array store check - check for NULL value first 878 __ br_null_short( Otos_i, Assembler::pn, is_null ); 879 880 __ load_klass(O3, O4); // get array klass 881 __ load_klass(Otos_i, O5); // get value klass 882 883 // do fast instanceof cache test 884 885 __ ld_ptr(O4, in_bytes(ObjArrayKlass::element_klass_offset()), O4); 886 887 assert(Otos_i == O0, "just checking"); 888 889 // Otos_i: value 890 // O1: addr - offset 891 // O2: index 892 // O3: array 893 // O4: array element klass 894 // O5: value klass 895 896 // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); 897 898 // Generate a fast subtype check. Branch to store_ok if no 899 // failure. Throw if failure. 900 __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok ); 901 902 // Not a subtype; so must throw exception 903 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch ); 904 905 // Store is OK. 906 __ bind(store_ok); 907 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true); 908 909 __ ba(done); 910 __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value) 911 912 __ bind(is_null); 913 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true); 914 915 __ profile_null_seen(G3_scratch); 916 __ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value) 917 __ bind(done); 918 } 919 920 921 void TemplateTable::bastore() { 922 transition(itos, vtos); 923 __ pop_i(O2); // index 924 // Otos_i: val 925 // O3: array 926 __ index_check(O3, O2, 0, G3_scratch, O2); 927 __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE)); 928 } 929 930 931 void TemplateTable::castore() { 932 transition(itos, vtos); 933 __ pop_i(O2); // index 934 // Otos_i: val 935 // O3: array 936 __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2); 937 __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR)); 938 } 939 940 941 void TemplateTable::sastore() { 942 // %%%%% Factor across platform 943 castore(); 944 } 945 946 947 void TemplateTable::istore(int n) { 948 transition(itos, vtos); 949 __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n)); 950 } 951 952 953 void TemplateTable::lstore(int n) { 954 transition(ltos, vtos); 955 assert(n+1 < Argument::n_register_parameters, "only handle register cases"); 956 __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1)); 957 958 } 959 960 961 void TemplateTable::fstore(int n) { 962 transition(ftos, vtos); 963 assert(n < Argument::n_register_parameters, "only handle register cases"); 964 __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n)); 965 } 966 967 968 void TemplateTable::dstore(int n) { 969 transition(dtos, vtos); 970 FloatRegister src = Ftos_d; 971 __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1)); 972 } 973 974 975 void TemplateTable::astore(int n) { 976 transition(vtos, vtos); 977 __ load_ptr(0, Otos_i); 978 __ inc(Lesp, Interpreter::stackElementSize); 979 __ verify_oop_or_return_address(Otos_i, G3_scratch); 980 __ store_local_ptr(n, Otos_i); 981 } 982 983 984 void TemplateTable::pop() { 985 transition(vtos, vtos); 986 __ inc(Lesp, Interpreter::stackElementSize); 987 } 988 989 990 void TemplateTable::pop2() { 991 transition(vtos, vtos); 992 __ inc(Lesp, 2 * Interpreter::stackElementSize); 993 } 994 995 996 void TemplateTable::dup() { 997 transition(vtos, vtos); 998 // stack: ..., a 999 // load a and tag 1000 __ load_ptr(0, Otos_i); 1001 __ push_ptr(Otos_i); 1002 // stack: ..., a, a 1003 } 1004 1005 1006 void TemplateTable::dup_x1() { 1007 transition(vtos, vtos); 1008 // stack: ..., a, b 1009 __ load_ptr( 1, G3_scratch); // get a 1010 __ load_ptr( 0, Otos_l1); // get b 1011 __ store_ptr(1, Otos_l1); // put b 1012 __ store_ptr(0, G3_scratch); // put a - like swap 1013 __ push_ptr(Otos_l1); // push b 1014 // stack: ..., b, a, b 1015 } 1016 1017 1018 void TemplateTable::dup_x2() { 1019 transition(vtos, vtos); 1020 // stack: ..., a, b, c 1021 // get c and push on stack, reuse registers 1022 __ load_ptr( 0, G3_scratch); // get c 1023 __ push_ptr(G3_scratch); // push c with tag 1024 // stack: ..., a, b, c, c (c in reg) (Lesp - 4) 1025 // (stack offsets n+1 now) 1026 __ load_ptr( 3, Otos_l1); // get a 1027 __ store_ptr(3, G3_scratch); // put c at 3 1028 // stack: ..., c, b, c, c (a in reg) 1029 __ load_ptr( 2, G3_scratch); // get b 1030 __ store_ptr(2, Otos_l1); // put a at 2 1031 // stack: ..., c, a, c, c (b in reg) 1032 __ store_ptr(1, G3_scratch); // put b at 1 1033 // stack: ..., c, a, b, c 1034 } 1035 1036 1037 void TemplateTable::dup2() { 1038 transition(vtos, vtos); 1039 __ load_ptr(1, G3_scratch); // get a 1040 __ load_ptr(0, Otos_l1); // get b 1041 __ push_ptr(G3_scratch); // push a 1042 __ push_ptr(Otos_l1); // push b 1043 // stack: ..., a, b, a, b 1044 } 1045 1046 1047 void TemplateTable::dup2_x1() { 1048 transition(vtos, vtos); 1049 // stack: ..., a, b, c 1050 __ load_ptr( 1, Lscratch); // get b 1051 __ load_ptr( 2, Otos_l1); // get a 1052 __ store_ptr(2, Lscratch); // put b at a 1053 // stack: ..., b, b, c 1054 __ load_ptr( 0, G3_scratch); // get c 1055 __ store_ptr(1, G3_scratch); // put c at b 1056 // stack: ..., b, c, c 1057 __ store_ptr(0, Otos_l1); // put a at c 1058 // stack: ..., b, c, a 1059 __ push_ptr(Lscratch); // push b 1060 __ push_ptr(G3_scratch); // push c 1061 // stack: ..., b, c, a, b, c 1062 } 1063 1064 1065 // The spec says that these types can be a mixture of category 1 (1 word) 1066 // types and/or category 2 types (long and doubles) 1067 void TemplateTable::dup2_x2() { 1068 transition(vtos, vtos); 1069 // stack: ..., a, b, c, d 1070 __ load_ptr( 1, Lscratch); // get c 1071 __ load_ptr( 3, Otos_l1); // get a 1072 __ store_ptr(3, Lscratch); // put c at 3 1073 __ store_ptr(1, Otos_l1); // put a at 1 1074 // stack: ..., c, b, a, d 1075 __ load_ptr( 2, G3_scratch); // get b 1076 __ load_ptr( 0, Otos_l1); // get d 1077 __ store_ptr(0, G3_scratch); // put b at 0 1078 __ store_ptr(2, Otos_l1); // put d at 2 1079 // stack: ..., c, d, a, b 1080 __ push_ptr(Lscratch); // push c 1081 __ push_ptr(Otos_l1); // push d 1082 // stack: ..., c, d, a, b, c, d 1083 } 1084 1085 1086 void TemplateTable::swap() { 1087 transition(vtos, vtos); 1088 // stack: ..., a, b 1089 __ load_ptr( 1, G3_scratch); // get a 1090 __ load_ptr( 0, Otos_l1); // get b 1091 __ store_ptr(0, G3_scratch); // put b 1092 __ store_ptr(1, Otos_l1); // put a 1093 // stack: ..., b, a 1094 } 1095 1096 1097 void TemplateTable::iop2(Operation op) { 1098 transition(itos, itos); 1099 __ pop_i(O1); 1100 switch (op) { 1101 case add: __ add(O1, Otos_i, Otos_i); break; 1102 case sub: __ sub(O1, Otos_i, Otos_i); break; 1103 // %%%%% Mul may not exist: better to call .mul? 1104 case mul: __ smul(O1, Otos_i, Otos_i); break; 1105 case _and: __ and3(O1, Otos_i, Otos_i); break; 1106 case _or: __ or3(O1, Otos_i, Otos_i); break; 1107 case _xor: __ xor3(O1, Otos_i, Otos_i); break; 1108 case shl: __ sll(O1, Otos_i, Otos_i); break; 1109 case shr: __ sra(O1, Otos_i, Otos_i); break; 1110 case ushr: __ srl(O1, Otos_i, Otos_i); break; 1111 default: ShouldNotReachHere(); 1112 } 1113 } 1114 1115 1116 void TemplateTable::lop2(Operation op) { 1117 transition(ltos, ltos); 1118 __ pop_l(O2); 1119 switch (op) { 1120 #ifdef _LP64 1121 case add: __ add(O2, Otos_l, Otos_l); break; 1122 case sub: __ sub(O2, Otos_l, Otos_l); break; 1123 case _and: __ and3(O2, Otos_l, Otos_l); break; 1124 case _or: __ or3(O2, Otos_l, Otos_l); break; 1125 case _xor: __ xor3(O2, Otos_l, Otos_l); break; 1126 #else 1127 case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break; 1128 case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break; 1129 case _and: __ and3(O3, Otos_l2, Otos_l2); __ and3(O2, Otos_l1, Otos_l1); break; 1130 case _or: __ or3(O3, Otos_l2, Otos_l2); __ or3(O2, Otos_l1, Otos_l1); break; 1131 case _xor: __ xor3(O3, Otos_l2, Otos_l2); __ xor3(O2, Otos_l1, Otos_l1); break; 1132 #endif 1133 default: ShouldNotReachHere(); 1134 } 1135 } 1136 1137 1138 void TemplateTable::idiv() { 1139 // %%%%% Later: ForSPARC/V7 call .sdiv library routine, 1140 // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe. 1141 1142 transition(itos, itos); 1143 __ pop_i(O1); // get 1st op 1144 1145 // Y contains upper 32 bits of result, set it to 0 or all ones 1146 __ wry(G0); 1147 __ mov(~0, G3_scratch); 1148 1149 __ tst(O1); 1150 Label neg; 1151 __ br(Assembler::negative, true, Assembler::pn, neg); 1152 __ delayed()->wry(G3_scratch); 1153 __ bind(neg); 1154 1155 Label ok; 1156 __ tst(Otos_i); 1157 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch ); 1158 1159 const int min_int = 0x80000000; 1160 Label regular; 1161 __ cmp(Otos_i, -1); 1162 __ br(Assembler::notEqual, false, Assembler::pt, regular); 1163 #ifdef _LP64 1164 // Don't put set in delay slot 1165 // Set will turn into multiple instructions in 64 bit mode 1166 __ delayed()->nop(); 1167 __ set(min_int, G4_scratch); 1168 #else 1169 __ delayed()->set(min_int, G4_scratch); 1170 #endif 1171 Label done; 1172 __ cmp(O1, G4_scratch); 1173 __ br(Assembler::equal, true, Assembler::pt, done); 1174 __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken) 1175 1176 __ bind(regular); 1177 __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction! 1178 __ bind(done); 1179 } 1180 1181 1182 void TemplateTable::irem() { 1183 transition(itos, itos); 1184 __ mov(Otos_i, O2); // save divisor 1185 idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1 1186 __ smul(Otos_i, O2, Otos_i); 1187 __ sub(O1, Otos_i, Otos_i); 1188 } 1189 1190 1191 void TemplateTable::lmul() { 1192 transition(ltos, ltos); 1193 __ pop_l(O2); 1194 #ifdef _LP64 1195 __ mulx(Otos_l, O2, Otos_l); 1196 #else 1197 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul)); 1198 #endif 1199 1200 } 1201 1202 1203 void TemplateTable::ldiv() { 1204 transition(ltos, ltos); 1205 1206 // check for zero 1207 __ pop_l(O2); 1208 #ifdef _LP64 1209 __ tst(Otos_l); 1210 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1211 __ sdivx(O2, Otos_l, Otos_l); 1212 #else 1213 __ orcc(Otos_l1, Otos_l2, G0); 1214 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1215 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv)); 1216 #endif 1217 } 1218 1219 1220 void TemplateTable::lrem() { 1221 transition(ltos, ltos); 1222 1223 // check for zero 1224 __ pop_l(O2); 1225 #ifdef _LP64 1226 __ tst(Otos_l); 1227 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1228 __ sdivx(O2, Otos_l, Otos_l2); 1229 __ mulx (Otos_l2, Otos_l, Otos_l2); 1230 __ sub (O2, Otos_l2, Otos_l); 1231 #else 1232 __ orcc(Otos_l1, Otos_l2, G0); 1233 __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1234 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem)); 1235 #endif 1236 } 1237 1238 1239 void TemplateTable::lshl() { 1240 transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra 1241 1242 __ pop_l(O2); // shift value in O2, O3 1243 #ifdef _LP64 1244 __ sllx(O2, Otos_i, Otos_l); 1245 #else 1246 __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4); 1247 #endif 1248 } 1249 1250 1251 void TemplateTable::lshr() { 1252 transition(itos, ltos); // %%%% see lshl comment 1253 1254 __ pop_l(O2); // shift value in O2, O3 1255 #ifdef _LP64 1256 __ srax(O2, Otos_i, Otos_l); 1257 #else 1258 __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4); 1259 #endif 1260 } 1261 1262 1263 1264 void TemplateTable::lushr() { 1265 transition(itos, ltos); // %%%% see lshl comment 1266 1267 __ pop_l(O2); // shift value in O2, O3 1268 #ifdef _LP64 1269 __ srlx(O2, Otos_i, Otos_l); 1270 #else 1271 __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4); 1272 #endif 1273 } 1274 1275 1276 void TemplateTable::fop2(Operation op) { 1277 transition(ftos, ftos); 1278 switch (op) { 1279 case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1280 case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1281 case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1282 case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1283 case rem: 1284 assert(Ftos_f == F0, "just checking"); 1285 #ifdef _LP64 1286 // LP64 calling conventions use F1, F3 for passing 2 floats 1287 __ pop_f(F1); 1288 __ fmov(FloatRegisterImpl::S, Ftos_f, F3); 1289 #else 1290 __ pop_i(O0); 1291 __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp); 1292 __ ld( __ d_tmp, O1 ); 1293 #endif 1294 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1295 assert( Ftos_f == F0, "fix this code" ); 1296 break; 1297 1298 default: ShouldNotReachHere(); 1299 } 1300 } 1301 1302 1303 void TemplateTable::dop2(Operation op) { 1304 transition(dtos, dtos); 1305 switch (op) { 1306 case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1307 case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1308 case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1309 case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1310 case rem: 1311 #ifdef _LP64 1312 // Pass arguments in D0, D2 1313 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 ); 1314 __ pop_d( F0 ); 1315 #else 1316 // Pass arguments in O0O1, O2O3 1317 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp); 1318 __ ldd( __ d_tmp, O2 ); 1319 __ pop_d(Ftos_f); 1320 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp); 1321 __ ldd( __ d_tmp, O0 ); 1322 #endif 1323 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1324 assert( Ftos_d == F0, "fix this code" ); 1325 break; 1326 1327 default: ShouldNotReachHere(); 1328 } 1329 } 1330 1331 1332 void TemplateTable::ineg() { 1333 transition(itos, itos); 1334 __ neg(Otos_i); 1335 } 1336 1337 1338 void TemplateTable::lneg() { 1339 transition(ltos, ltos); 1340 #ifdef _LP64 1341 __ sub(G0, Otos_l, Otos_l); 1342 #else 1343 __ lneg(Otos_l1, Otos_l2); 1344 #endif 1345 } 1346 1347 1348 void TemplateTable::fneg() { 1349 transition(ftos, ftos); 1350 __ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f); 1351 } 1352 1353 1354 void TemplateTable::dneg() { 1355 transition(dtos, dtos); 1356 __ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f); 1357 } 1358 1359 1360 void TemplateTable::iinc() { 1361 transition(vtos, vtos); 1362 locals_index(G3_scratch); 1363 __ ldsb(Lbcp, 2, O2); // load constant 1364 __ access_local_int(G3_scratch, Otos_i); 1365 __ add(Otos_i, O2, Otos_i); 1366 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch 1367 } 1368 1369 1370 void TemplateTable::wide_iinc() { 1371 transition(vtos, vtos); 1372 locals_index_wide(G3_scratch); 1373 __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed); 1374 __ access_local_int(G3_scratch, Otos_i); 1375 __ add(Otos_i, O3, Otos_i); 1376 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch 1377 } 1378 1379 1380 void TemplateTable::convert() { 1381 // %%%%% Factor this first part accross platforms 1382 #ifdef ASSERT 1383 TosState tos_in = ilgl; 1384 TosState tos_out = ilgl; 1385 switch (bytecode()) { 1386 case Bytecodes::_i2l: // fall through 1387 case Bytecodes::_i2f: // fall through 1388 case Bytecodes::_i2d: // fall through 1389 case Bytecodes::_i2b: // fall through 1390 case Bytecodes::_i2c: // fall through 1391 case Bytecodes::_i2s: tos_in = itos; break; 1392 case Bytecodes::_l2i: // fall through 1393 case Bytecodes::_l2f: // fall through 1394 case Bytecodes::_l2d: tos_in = ltos; break; 1395 case Bytecodes::_f2i: // fall through 1396 case Bytecodes::_f2l: // fall through 1397 case Bytecodes::_f2d: tos_in = ftos; break; 1398 case Bytecodes::_d2i: // fall through 1399 case Bytecodes::_d2l: // fall through 1400 case Bytecodes::_d2f: tos_in = dtos; break; 1401 default : ShouldNotReachHere(); 1402 } 1403 switch (bytecode()) { 1404 case Bytecodes::_l2i: // fall through 1405 case Bytecodes::_f2i: // fall through 1406 case Bytecodes::_d2i: // fall through 1407 case Bytecodes::_i2b: // fall through 1408 case Bytecodes::_i2c: // fall through 1409 case Bytecodes::_i2s: tos_out = itos; break; 1410 case Bytecodes::_i2l: // fall through 1411 case Bytecodes::_f2l: // fall through 1412 case Bytecodes::_d2l: tos_out = ltos; break; 1413 case Bytecodes::_i2f: // fall through 1414 case Bytecodes::_l2f: // fall through 1415 case Bytecodes::_d2f: tos_out = ftos; break; 1416 case Bytecodes::_i2d: // fall through 1417 case Bytecodes::_l2d: // fall through 1418 case Bytecodes::_f2d: tos_out = dtos; break; 1419 default : ShouldNotReachHere(); 1420 } 1421 transition(tos_in, tos_out); 1422 #endif 1423 1424 1425 // Conversion 1426 Label done; 1427 switch (bytecode()) { 1428 case Bytecodes::_i2l: 1429 #ifdef _LP64 1430 // Sign extend the 32 bits 1431 __ sra ( Otos_i, 0, Otos_l ); 1432 #else 1433 __ addcc(Otos_i, 0, Otos_l2); 1434 __ br(Assembler::greaterEqual, true, Assembler::pt, done); 1435 __ delayed()->clr(Otos_l1); 1436 __ set(~0, Otos_l1); 1437 #endif 1438 break; 1439 1440 case Bytecodes::_i2f: 1441 __ st(Otos_i, __ d_tmp ); 1442 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); 1443 __ fitof(FloatRegisterImpl::S, F0, Ftos_f); 1444 break; 1445 1446 case Bytecodes::_i2d: 1447 __ st(Otos_i, __ d_tmp); 1448 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); 1449 __ fitof(FloatRegisterImpl::D, F0, Ftos_f); 1450 break; 1451 1452 case Bytecodes::_i2b: 1453 __ sll(Otos_i, 24, Otos_i); 1454 __ sra(Otos_i, 24, Otos_i); 1455 break; 1456 1457 case Bytecodes::_i2c: 1458 __ sll(Otos_i, 16, Otos_i); 1459 __ srl(Otos_i, 16, Otos_i); 1460 break; 1461 1462 case Bytecodes::_i2s: 1463 __ sll(Otos_i, 16, Otos_i); 1464 __ sra(Otos_i, 16, Otos_i); 1465 break; 1466 1467 case Bytecodes::_l2i: 1468 #ifndef _LP64 1469 __ mov(Otos_l2, Otos_i); 1470 #else 1471 // Sign-extend into the high 32 bits 1472 __ sra(Otos_l, 0, Otos_i); 1473 #endif 1474 break; 1475 1476 case Bytecodes::_l2f: 1477 case Bytecodes::_l2d: 1478 __ st_long(Otos_l, __ d_tmp); 1479 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d); 1480 1481 if (bytecode() == Bytecodes::_l2f) { 1482 __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f); 1483 } else { 1484 __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d); 1485 } 1486 break; 1487 1488 case Bytecodes::_f2i: { 1489 Label isNaN; 1490 // result must be 0 if value is NaN; test by comparing value to itself 1491 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f); 1492 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN); 1493 __ delayed()->clr(Otos_i); // NaN 1494 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30); 1495 __ stf(FloatRegisterImpl::S, F30, __ d_tmp); 1496 __ ld(__ d_tmp, Otos_i); 1497 __ bind(isNaN); 1498 } 1499 break; 1500 1501 case Bytecodes::_f2l: 1502 // must uncache tos 1503 __ push_f(); 1504 #ifdef _LP64 1505 __ pop_f(F1); 1506 #else 1507 __ pop_i(O0); 1508 #endif 1509 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l)); 1510 break; 1511 1512 case Bytecodes::_f2d: 1513 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f); 1514 break; 1515 1516 case Bytecodes::_d2i: 1517 case Bytecodes::_d2l: 1518 // must uncache tos 1519 __ push_d(); 1520 #ifdef _LP64 1521 // LP64 calling conventions pass first double arg in D0 1522 __ pop_d( Ftos_d ); 1523 #else 1524 __ pop_i( O0 ); 1525 __ pop_i( O1 ); 1526 #endif 1527 __ call_VM_leaf(Lscratch, 1528 bytecode() == Bytecodes::_d2i 1529 ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i) 1530 : CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); 1531 break; 1532 1533 case Bytecodes::_d2f: 1534 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f); 1535 break; 1536 1537 default: ShouldNotReachHere(); 1538 } 1539 __ bind(done); 1540 } 1541 1542 1543 void TemplateTable::lcmp() { 1544 transition(ltos, itos); 1545 1546 #ifdef _LP64 1547 __ pop_l(O1); // pop off value 1, value 2 is in O0 1548 __ lcmp( O1, Otos_l, Otos_i ); 1549 #else 1550 __ pop_l(O2); // cmp O2,3 to O0,1 1551 __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i ); 1552 #endif 1553 } 1554 1555 1556 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1557 1558 if (is_float) __ pop_f(F2); 1559 else __ pop_d(F2); 1560 1561 assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:"); 1562 1563 __ float_cmp( is_float, unordered_result, F2, F0, Otos_i ); 1564 } 1565 1566 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1567 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1568 __ verify_thread(); 1569 1570 const Register O2_bumped_count = O2; 1571 __ profile_taken_branch(G3_scratch, O2_bumped_count); 1572 1573 // get (wide) offset to O1_disp 1574 const Register O1_disp = O1; 1575 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC); 1576 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC); 1577 1578 // Handle all the JSR stuff here, then exit. 1579 // It's much shorter and cleaner than intermingling with the 1580 // non-JSR normal-branch stuff occurring below. 1581 if( is_jsr ) { 1582 // compute return address as bci in Otos_i 1583 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1584 __ sub(Lbcp, G3_scratch, G3_scratch); 1585 __ sub(G3_scratch, in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3), Otos_i); 1586 1587 // Bump Lbcp to target of JSR 1588 __ add(Lbcp, O1_disp, Lbcp); 1589 // Push returnAddress for "ret" on stack 1590 __ push_ptr(Otos_i); 1591 // And away we go! 1592 __ dispatch_next(vtos); 1593 return; 1594 } 1595 1596 // Normal (non-jsr) branch handling 1597 1598 // Save the current Lbcp 1599 const Register l_cur_bcp = Lscratch; 1600 __ mov( Lbcp, l_cur_bcp ); 1601 1602 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1603 if ( increment_invocation_counter_for_backward_branches ) { 1604 Label Lforward; 1605 // check branch direction 1606 __ br( Assembler::positive, false, Assembler::pn, Lforward ); 1607 // Bump bytecode pointer by displacement (take the branch) 1608 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr 1609 1610 const Register G3_method_counters = G3_scratch; 1611 __ get_method_counters(Lmethod, G3_method_counters, Lforward); 1612 1613 if (TieredCompilation) { 1614 Label Lno_mdo, Loverflow; 1615 int increment = InvocationCounter::count_increment; 1616 if (ProfileInterpreter) { 1617 // If no method data exists, go to profile_continue. 1618 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch); 1619 __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo); 1620 1621 // Increment backedge counter in the MDO 1622 Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) + 1623 in_bytes(InvocationCounter::counter_offset())); 1624 Address mask(G4_scratch, in_bytes(MethodData::backedge_mask_offset())); 1625 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0, 1626 Assembler::notZero, &Lforward); 1627 __ ba_short(Loverflow); 1628 } 1629 1630 // If there's no MDO, increment counter in MethodCounters* 1631 __ bind(Lno_mdo); 1632 Address backedge_counter(G3_method_counters, 1633 in_bytes(MethodCounters::backedge_counter_offset()) + 1634 in_bytes(InvocationCounter::counter_offset())); 1635 Address mask(G3_method_counters, in_bytes(MethodCounters::backedge_mask_offset())); 1636 __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0, 1637 Assembler::notZero, &Lforward); 1638 __ bind(Loverflow); 1639 1640 // notify point for loop, pass branch bytecode 1641 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), l_cur_bcp); 1642 1643 // Was an OSR adapter generated? 1644 // O0 = osr nmethod 1645 __ br_null_short(O0, Assembler::pn, Lforward); 1646 1647 // Has the nmethod been invalidated already? 1648 __ ldub(O0, nmethod::state_offset(), O2); 1649 __ cmp_and_br_short(O2, nmethod::in_use, Assembler::notEqual, Assembler::pn, Lforward); 1650 1651 // migrate the interpreter frame off of the stack 1652 1653 __ mov(G2_thread, L7); 1654 // save nmethod 1655 __ mov(O0, L6); 1656 __ set_last_Java_frame(SP, noreg); 1657 __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); 1658 __ reset_last_Java_frame(); 1659 __ mov(L7, G2_thread); 1660 1661 // move OSR nmethod to I1 1662 __ mov(L6, I1); 1663 1664 // OSR buffer to I0 1665 __ mov(O0, I0); 1666 1667 // remove the interpreter frame 1668 __ restore(I5_savedSP, 0, SP); 1669 1670 // Jump to the osr code. 1671 __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); 1672 __ jmp(O2, G0); 1673 __ delayed()->nop(); 1674 1675 } else { // not TieredCompilation 1676 // Update Backedge branch separately from invocations 1677 const Register G4_invoke_ctr = G4; 1678 __ increment_backedge_counter(G3_method_counters, G4_invoke_ctr, G1_scratch); 1679 if (ProfileInterpreter) { 1680 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_method_counters, G1_scratch, Lforward); 1681 if (UseOnStackReplacement) { 1682 1683 __ test_backedge_count_for_osr(O2_bumped_count, G3_method_counters, l_cur_bcp, G1_scratch); 1684 } 1685 } else { 1686 if (UseOnStackReplacement) { 1687 __ test_backedge_count_for_osr(G4_invoke_ctr, G3_method_counters, l_cur_bcp, G1_scratch); 1688 } 1689 } 1690 } 1691 1692 __ bind(Lforward); 1693 } else 1694 // Bump bytecode pointer by displacement (take the branch) 1695 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr 1696 1697 // continue with bytecode @ target 1698 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above, 1699 // %%%%% and changing dispatch_next to dispatch_only 1700 __ dispatch_next(vtos); 1701 } 1702 1703 1704 // Note Condition in argument is TemplateTable::Condition 1705 // arg scope is within class scope 1706 1707 void TemplateTable::if_0cmp(Condition cc) { 1708 // no pointers, integer only! 1709 transition(itos, vtos); 1710 // assume branch is more often taken than not (loops use backward branches) 1711 __ cmp( Otos_i, 0); 1712 __ if_cmp(ccNot(cc), false); 1713 } 1714 1715 1716 void TemplateTable::if_icmp(Condition cc) { 1717 transition(itos, vtos); 1718 __ pop_i(O1); 1719 __ cmp(O1, Otos_i); 1720 __ if_cmp(ccNot(cc), false); 1721 } 1722 1723 1724 void TemplateTable::if_nullcmp(Condition cc) { 1725 transition(atos, vtos); 1726 __ tst(Otos_i); 1727 __ if_cmp(ccNot(cc), true); 1728 } 1729 1730 1731 void TemplateTable::if_acmp(Condition cc) { 1732 transition(atos, vtos); 1733 __ pop_ptr(O1); 1734 __ verify_oop(O1); 1735 __ verify_oop(Otos_i); 1736 __ cmp(O1, Otos_i); 1737 __ if_cmp(ccNot(cc), true); 1738 } 1739 1740 1741 1742 void TemplateTable::ret() { 1743 transition(vtos, vtos); 1744 locals_index(G3_scratch); 1745 __ access_local_returnAddress(G3_scratch, Otos_i); 1746 // Otos_i contains the bci, compute the bcp from that 1747 1748 #ifdef _LP64 1749 #ifdef ASSERT 1750 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC 1751 // the result. The return address (really a BCI) was stored with an 1752 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in 1753 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit 1754 // loaded value. 1755 { Label zzz ; 1756 __ set (65536, G3_scratch) ; 1757 __ cmp (Otos_i, G3_scratch) ; 1758 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz); 1759 __ delayed()->nop(); 1760 __ stop("BCI is in the wrong register half?"); 1761 __ bind (zzz) ; 1762 } 1763 #endif 1764 #endif 1765 1766 __ profile_ret(vtos, Otos_i, G4_scratch); 1767 1768 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1769 __ add(G3_scratch, Otos_i, G3_scratch); 1770 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp); 1771 __ dispatch_next(vtos); 1772 } 1773 1774 1775 void TemplateTable::wide_ret() { 1776 transition(vtos, vtos); 1777 locals_index_wide(G3_scratch); 1778 __ access_local_returnAddress(G3_scratch, Otos_i); 1779 // Otos_i contains the bci, compute the bcp from that 1780 1781 __ profile_ret(vtos, Otos_i, G4_scratch); 1782 1783 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1784 __ add(G3_scratch, Otos_i, G3_scratch); 1785 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp); 1786 __ dispatch_next(vtos); 1787 } 1788 1789 1790 void TemplateTable::tableswitch() { 1791 transition(itos, vtos); 1792 Label default_case, continue_execution; 1793 1794 // align bcp 1795 __ add(Lbcp, BytesPerInt, O1); 1796 __ and3(O1, -BytesPerInt, O1); 1797 // load lo, hi 1798 __ ld(O1, 1 * BytesPerInt, O2); // Low Byte 1799 __ ld(O1, 2 * BytesPerInt, O3); // High Byte 1800 #ifdef _LP64 1801 // Sign extend the 32 bits 1802 __ sra ( Otos_i, 0, Otos_i ); 1803 #endif /* _LP64 */ 1804 1805 // check against lo & hi 1806 __ cmp( Otos_i, O2); 1807 __ br( Assembler::less, false, Assembler::pn, default_case); 1808 __ delayed()->cmp( Otos_i, O3 ); 1809 __ br( Assembler::greater, false, Assembler::pn, default_case); 1810 // lookup dispatch offset 1811 __ delayed()->sub(Otos_i, O2, O2); 1812 __ profile_switch_case(O2, O3, G3_scratch, G4_scratch); 1813 __ sll(O2, LogBytesPerInt, O2); 1814 __ add(O2, 3 * BytesPerInt, O2); 1815 __ ba(continue_execution); 1816 __ delayed()->ld(O1, O2, O2); 1817 // handle default 1818 __ bind(default_case); 1819 __ profile_switch_default(O3); 1820 __ ld(O1, 0, O2); // get default offset 1821 // continue execution 1822 __ bind(continue_execution); 1823 __ add(Lbcp, O2, Lbcp); 1824 __ dispatch_next(vtos); 1825 } 1826 1827 1828 void TemplateTable::lookupswitch() { 1829 transition(itos, itos); 1830 __ stop("lookupswitch bytecode should have been rewritten"); 1831 } 1832 1833 void TemplateTable::fast_linearswitch() { 1834 transition(itos, vtos); 1835 Label loop_entry, loop, found, continue_execution; 1836 // align bcp 1837 __ add(Lbcp, BytesPerInt, O1); 1838 __ and3(O1, -BytesPerInt, O1); 1839 // set counter 1840 __ ld(O1, BytesPerInt, O2); 1841 __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs 1842 __ add(O1, 2 * BytesPerInt, O3); // set first pair addr 1843 __ ba(loop_entry); 1844 __ delayed()->add(O3, O2, O2); // counter now points past last pair 1845 1846 // table search 1847 __ bind(loop); 1848 __ cmp(O4, Otos_i); 1849 __ br(Assembler::equal, true, Assembler::pn, found); 1850 __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4 1851 __ inc(O3, 2 * BytesPerInt); 1852 1853 __ bind(loop_entry); 1854 __ cmp(O2, O3); 1855 __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop); 1856 __ delayed()->ld(O3, 0, O4); 1857 1858 // default case 1859 __ ld(O1, 0, O4); // get default offset 1860 if (ProfileInterpreter) { 1861 __ profile_switch_default(O3); 1862 __ ba_short(continue_execution); 1863 } 1864 1865 // entry found -> get offset 1866 __ bind(found); 1867 if (ProfileInterpreter) { 1868 __ sub(O3, O1, O3); 1869 __ sub(O3, 2*BytesPerInt, O3); 1870 __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs 1871 __ profile_switch_case(O3, O1, O2, G3_scratch); 1872 1873 __ bind(continue_execution); 1874 } 1875 __ add(Lbcp, O4, Lbcp); 1876 __ dispatch_next(vtos); 1877 } 1878 1879 1880 void TemplateTable::fast_binaryswitch() { 1881 transition(itos, vtos); 1882 // Implementation using the following core algorithm: (copied from Intel) 1883 // 1884 // int binary_search(int key, LookupswitchPair* array, int n) { 1885 // // Binary search according to "Methodik des Programmierens" by 1886 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 1887 // int i = 0; 1888 // int j = n; 1889 // while (i+1 < j) { 1890 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 1891 // // with Q: for all i: 0 <= i < n: key < a[i] 1892 // // where a stands for the array and assuming that the (inexisting) 1893 // // element a[n] is infinitely big. 1894 // int h = (i + j) >> 1; 1895 // // i < h < j 1896 // if (key < array[h].fast_match()) { 1897 // j = h; 1898 // } else { 1899 // i = h; 1900 // } 1901 // } 1902 // // R: a[i] <= key < a[i+1] or Q 1903 // // (i.e., if key is within array, i is the correct index) 1904 // return i; 1905 // } 1906 1907 // register allocation 1908 assert(Otos_i == O0, "alias checking"); 1909 const Register Rkey = Otos_i; // already set (tosca) 1910 const Register Rarray = O1; 1911 const Register Ri = O2; 1912 const Register Rj = O3; 1913 const Register Rh = O4; 1914 const Register Rscratch = O5; 1915 1916 const int log_entry_size = 3; 1917 const int entry_size = 1 << log_entry_size; 1918 1919 Label found; 1920 // Find Array start 1921 __ add(Lbcp, 3 * BytesPerInt, Rarray); 1922 __ and3(Rarray, -BytesPerInt, Rarray); 1923 // initialize i & j (in delay slot) 1924 __ clr( Ri ); 1925 1926 // and start 1927 Label entry; 1928 __ ba(entry); 1929 __ delayed()->ld( Rarray, -BytesPerInt, Rj); 1930 // (Rj is already in the native byte-ordering.) 1931 1932 // binary search loop 1933 { Label loop; 1934 __ bind( loop ); 1935 // int h = (i + j) >> 1; 1936 __ sra( Rh, 1, Rh ); 1937 // if (key < array[h].fast_match()) { 1938 // j = h; 1939 // } else { 1940 // i = h; 1941 // } 1942 __ sll( Rh, log_entry_size, Rscratch ); 1943 __ ld( Rarray, Rscratch, Rscratch ); 1944 // (Rscratch is already in the native byte-ordering.) 1945 __ cmp( Rkey, Rscratch ); 1946 __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match()) 1947 __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match()) 1948 1949 // while (i+1 < j) 1950 __ bind( entry ); 1951 __ add( Ri, 1, Rscratch ); 1952 __ cmp(Rscratch, Rj); 1953 __ br( Assembler::less, true, Assembler::pt, loop ); 1954 __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1; 1955 } 1956 1957 // end of binary search, result index is i (must check again!) 1958 Label default_case; 1959 Label continue_execution; 1960 if (ProfileInterpreter) { 1961 __ mov( Ri, Rh ); // Save index in i for profiling 1962 } 1963 __ sll( Ri, log_entry_size, Ri ); 1964 __ ld( Rarray, Ri, Rscratch ); 1965 // (Rscratch is already in the native byte-ordering.) 1966 __ cmp( Rkey, Rscratch ); 1967 __ br( Assembler::notEqual, true, Assembler::pn, default_case ); 1968 __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j 1969 1970 // entry found -> j = offset 1971 __ inc( Ri, BytesPerInt ); 1972 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 1973 __ ld( Rarray, Ri, Rj ); 1974 // (Rj is already in the native byte-ordering.) 1975 1976 if (ProfileInterpreter) { 1977 __ ba_short(continue_execution); 1978 } 1979 1980 __ bind(default_case); // fall through (if not profiling) 1981 __ profile_switch_default(Ri); 1982 1983 __ bind(continue_execution); 1984 __ add( Lbcp, Rj, Lbcp ); 1985 __ dispatch_next( vtos ); 1986 } 1987 1988 1989 void TemplateTable::_return(TosState state) { 1990 transition(state, state); 1991 assert(_desc->calls_vm(), "inconsistent calls_vm information"); 1992 1993 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 1994 assert(state == vtos, "only valid state"); 1995 __ mov(G0, G3_scratch); 1996 __ access_local_ptr(G3_scratch, Otos_i); 1997 __ load_klass(Otos_i, O2); 1998 __ set(JVM_ACC_HAS_FINALIZER, G3); 1999 __ ld(O2, in_bytes(Klass::access_flags_offset()), O2); 2000 __ andcc(G3, O2, G0); 2001 Label skip_register_finalizer; 2002 __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer); 2003 __ delayed()->nop(); 2004 2005 // Call out to do finalizer registration 2006 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i); 2007 2008 __ bind(skip_register_finalizer); 2009 } 2010 2011 __ remove_activation(state, /* throw_monitor_exception */ true); 2012 2013 // The caller's SP was adjusted upon method entry to accomodate 2014 // the callee's non-argument locals. Undo that adjustment. 2015 __ ret(); // return to caller 2016 __ delayed()->restore(I5_savedSP, G0, SP); 2017 } 2018 2019 2020 // ---------------------------------------------------------------------------- 2021 // Volatile variables demand their effects be made known to all CPU's in 2022 // order. Store buffers on most chips allow reads & writes to reorder; the 2023 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of 2024 // memory barrier (i.e., it's not sufficient that the interpreter does not 2025 // reorder volatile references, the hardware also must not reorder them). 2026 // 2027 // According to the new Java Memory Model (JMM): 2028 // (1) All volatiles are serialized wrt to each other. 2029 // ALSO reads & writes act as aquire & release, so: 2030 // (2) A read cannot let unrelated NON-volatile memory refs that happen after 2031 // the read float up to before the read. It's OK for non-volatile memory refs 2032 // that happen before the volatile read to float down below it. 2033 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs 2034 // that happen BEFORE the write float down to after the write. It's OK for 2035 // non-volatile memory refs that happen after the volatile write to float up 2036 // before it. 2037 // 2038 // We only put in barriers around volatile refs (they are expensive), not 2039 // _between_ memory refs (that would require us to track the flavor of the 2040 // previous memory refs). Requirements (2) and (3) require some barriers 2041 // before volatile stores and after volatile loads. These nearly cover 2042 // requirement (1) but miss the volatile-store-volatile-load case. This final 2043 // case is placed after volatile-stores although it could just as well go 2044 // before volatile-loads. 2045 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) { 2046 // Helper function to insert a is-volatile test and memory barrier 2047 // All current sparc implementations run in TSO, needing only StoreLoad 2048 if ((order_constraint & Assembler::StoreLoad) == 0) return; 2049 __ membar( order_constraint ); 2050 } 2051 2052 // ---------------------------------------------------------------------------- 2053 void TemplateTable::resolve_cache_and_index(int byte_no, 2054 Register Rcache, 2055 Register index, 2056 size_t index_size) { 2057 // Depends on cpCacheOop layout! 2058 2059 Label resolved; 2060 Bytecodes::Code code = bytecode(); 2061 switch (code) { 2062 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; 2063 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; 2064 } 2065 2066 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2067 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size); 2068 __ cmp(Lbyte_code, code); // have we resolved this bytecode? 2069 __ br(Assembler::equal, false, Assembler::pt, resolved); 2070 __ delayed()->set(code, O1); 2071 2072 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 2073 // first time invocation - must resolve first 2074 __ call_VM(noreg, entry, O1); 2075 // Update registers with resolved info 2076 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); 2077 __ bind(resolved); 2078 } 2079 2080 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2081 Register method, 2082 Register itable_index, 2083 Register flags, 2084 bool is_invokevirtual, 2085 bool is_invokevfinal, 2086 bool is_invokedynamic) { 2087 // Uses both G3_scratch and G4_scratch 2088 Register cache = G3_scratch; 2089 Register index = G4_scratch; 2090 assert_different_registers(cache, method, itable_index); 2091 2092 // determine constant pool cache field offsets 2093 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2094 const int method_offset = in_bytes( 2095 ConstantPoolCache::base_offset() + 2096 ((byte_no == f2_byte) 2097 ? ConstantPoolCacheEntry::f2_offset() 2098 : ConstantPoolCacheEntry::f1_offset() 2099 ) 2100 ); 2101 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() + 2102 ConstantPoolCacheEntry::flags_offset()); 2103 // access constant pool cache fields 2104 const int index_offset = in_bytes(ConstantPoolCache::base_offset() + 2105 ConstantPoolCacheEntry::f2_offset()); 2106 2107 if (is_invokevfinal) { 2108 __ get_cache_and_index_at_bcp(cache, index, 1); 2109 __ ld_ptr(Address(cache, method_offset), method); 2110 } else { 2111 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2112 resolve_cache_and_index(byte_no, cache, index, index_size); 2113 __ ld_ptr(Address(cache, method_offset), method); 2114 } 2115 2116 if (itable_index != noreg) { 2117 // pick up itable or appendix index from f2 also: 2118 __ ld_ptr(Address(cache, index_offset), itable_index); 2119 } 2120 __ ld_ptr(Address(cache, flags_offset), flags); 2121 } 2122 2123 // The Rcache register must be set before call 2124 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2125 Register Rcache, 2126 Register index, 2127 Register Roffset, 2128 Register Rflags, 2129 bool is_static) { 2130 assert_different_registers(Rcache, Rflags, Roffset); 2131 2132 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2133 2134 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2135 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2136 if (is_static) { 2137 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj); 2138 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 2139 __ ld_ptr( Robj, mirror_offset, Robj); 2140 } 2141 } 2142 2143 // The registers Rcache and index expected to be set before call. 2144 // Correct values of the Rcache and index registers are preserved. 2145 void TemplateTable::jvmti_post_field_access(Register Rcache, 2146 Register index, 2147 bool is_static, 2148 bool has_tos) { 2149 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2150 2151 if (JvmtiExport::can_post_field_access()) { 2152 // Check to see if a field access watch has been set before we take 2153 // the time to call into the VM. 2154 Label Label1; 2155 assert_different_registers(Rcache, index, G1_scratch); 2156 AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr()); 2157 __ load_contents(get_field_access_count_addr, G1_scratch); 2158 __ cmp_and_br_short(G1_scratch, 0, Assembler::equal, Assembler::pt, Label1); 2159 2160 __ add(Rcache, in_bytes(cp_base_offset), Rcache); 2161 2162 if (is_static) { 2163 __ clr(Otos_i); 2164 } else { 2165 if (has_tos) { 2166 // save object pointer before call_VM() clobbers it 2167 __ push_ptr(Otos_i); // put object on tos where GC wants it. 2168 } else { 2169 // Load top of stack (do not pop the value off the stack); 2170 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); 2171 } 2172 __ verify_oop(Otos_i); 2173 } 2174 // Otos_i: object pointer or NULL if static 2175 // Rcache: cache entry pointer 2176 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), 2177 Otos_i, Rcache); 2178 if (!is_static && has_tos) { 2179 __ pop_ptr(Otos_i); // restore object pointer 2180 __ verify_oop(Otos_i); 2181 } 2182 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2183 __ bind(Label1); 2184 } 2185 } 2186 2187 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2188 transition(vtos, vtos); 2189 2190 Register Rcache = G3_scratch; 2191 Register index = G4_scratch; 2192 Register Rclass = Rcache; 2193 Register Roffset= G4_scratch; 2194 Register Rflags = G1_scratch; 2195 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2196 2197 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2)); 2198 jvmti_post_field_access(Rcache, index, is_static, false); 2199 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); 2200 2201 if (!is_static) { 2202 pop_and_check_object(Rclass); 2203 } else { 2204 __ verify_oop(Rclass); 2205 } 2206 2207 Label exit; 2208 2209 Assembler::Membar_mask_bits membar_bits = 2210 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2211 2212 if (__ membar_has_effect(membar_bits)) { 2213 // Get volatile flag 2214 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2215 __ and3(Rflags, Lscratch, Lscratch); 2216 } 2217 2218 Label checkVolatile; 2219 2220 // compute field type 2221 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj; 2222 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2223 // Make sure we don't need to mask Rflags after the above shift 2224 ConstantPoolCacheEntry::verify_tos_state_shift(); 2225 2226 // Check atos before itos for getstatic, more likely (in Queens at least) 2227 __ cmp(Rflags, atos); 2228 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2229 __ delayed() ->cmp(Rflags, itos); 2230 2231 // atos 2232 __ load_heap_oop(Rclass, Roffset, Otos_i); 2233 __ verify_oop(Otos_i); 2234 __ push(atos); 2235 if (!is_static && rc == may_rewrite) { 2236 patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch); 2237 } 2238 __ ba(checkVolatile); 2239 __ delayed()->tst(Lscratch); 2240 2241 __ bind(notObj); 2242 2243 // cmp(Rflags, itos); 2244 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2245 __ delayed() ->cmp(Rflags, ltos); 2246 2247 // itos 2248 __ ld(Rclass, Roffset, Otos_i); 2249 __ push(itos); 2250 if (!is_static && rc == may_rewrite) { 2251 patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch); 2252 } 2253 __ ba(checkVolatile); 2254 __ delayed()->tst(Lscratch); 2255 2256 __ bind(notInt); 2257 2258 // cmp(Rflags, ltos); 2259 __ br(Assembler::notEqual, false, Assembler::pt, notLong); 2260 __ delayed() ->cmp(Rflags, btos); 2261 2262 // ltos 2263 // load must be atomic 2264 __ ld_long(Rclass, Roffset, Otos_l); 2265 __ push(ltos); 2266 if (!is_static && rc == may_rewrite) { 2267 patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch); 2268 } 2269 __ ba(checkVolatile); 2270 __ delayed()->tst(Lscratch); 2271 2272 __ bind(notLong); 2273 2274 // cmp(Rflags, btos); 2275 __ br(Assembler::notEqual, false, Assembler::pt, notByte); 2276 __ delayed() ->cmp(Rflags, ctos); 2277 2278 // btos 2279 __ ldsb(Rclass, Roffset, Otos_i); 2280 __ push(itos); 2281 if (!is_static && rc == may_rewrite) { 2282 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch); 2283 } 2284 __ ba(checkVolatile); 2285 __ delayed()->tst(Lscratch); 2286 2287 __ bind(notByte); 2288 2289 // cmp(Rflags, ctos); 2290 __ br(Assembler::notEqual, false, Assembler::pt, notChar); 2291 __ delayed() ->cmp(Rflags, stos); 2292 2293 // ctos 2294 __ lduh(Rclass, Roffset, Otos_i); 2295 __ push(itos); 2296 if (!is_static && rc == may_rewrite) { 2297 patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch); 2298 } 2299 __ ba(checkVolatile); 2300 __ delayed()->tst(Lscratch); 2301 2302 __ bind(notChar); 2303 2304 // cmp(Rflags, stos); 2305 __ br(Assembler::notEqual, false, Assembler::pt, notShort); 2306 __ delayed() ->cmp(Rflags, ftos); 2307 2308 // stos 2309 __ ldsh(Rclass, Roffset, Otos_i); 2310 __ push(itos); 2311 if (!is_static && rc == may_rewrite) { 2312 patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch); 2313 } 2314 __ ba(checkVolatile); 2315 __ delayed()->tst(Lscratch); 2316 2317 __ bind(notShort); 2318 2319 2320 // cmp(Rflags, ftos); 2321 __ br(Assembler::notEqual, false, Assembler::pt, notFloat); 2322 __ delayed() ->tst(Lscratch); 2323 2324 // ftos 2325 __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f); 2326 __ push(ftos); 2327 if (!is_static && rc == may_rewrite) { 2328 patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch); 2329 } 2330 __ ba(checkVolatile); 2331 __ delayed()->tst(Lscratch); 2332 2333 __ bind(notFloat); 2334 2335 2336 // dtos 2337 __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d); 2338 __ push(dtos); 2339 if (!is_static && rc == may_rewrite) { 2340 patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch); 2341 } 2342 2343 __ bind(checkVolatile); 2344 if (__ membar_has_effect(membar_bits)) { 2345 // __ tst(Lscratch); executed in delay slot 2346 __ br(Assembler::zero, false, Assembler::pt, exit); 2347 __ delayed()->nop(); 2348 volatile_barrier(membar_bits); 2349 } 2350 2351 __ bind(exit); 2352 } 2353 2354 void TemplateTable::getfield(int byte_no) { 2355 getfield_or_static(byte_no, false); 2356 } 2357 2358 void TemplateTable::nofast_getfield(int byte_no) { 2359 getfield_or_static(byte_no, false, may_not_rewrite); 2360 } 2361 2362 void TemplateTable::getstatic(int byte_no) { 2363 getfield_or_static(byte_no, true); 2364 } 2365 2366 void TemplateTable::fast_accessfield(TosState state) { 2367 transition(atos, state); 2368 Register Rcache = G3_scratch; 2369 Register index = G4_scratch; 2370 Register Roffset = G4_scratch; 2371 Register Rflags = Rcache; 2372 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2373 2374 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2375 jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true); 2376 2377 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2378 2379 __ null_check(Otos_i); 2380 __ verify_oop(Otos_i); 2381 2382 Label exit; 2383 2384 Assembler::Membar_mask_bits membar_bits = 2385 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2386 if (__ membar_has_effect(membar_bits)) { 2387 // Get volatile flag 2388 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags); 2389 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2390 } 2391 2392 switch (bytecode()) { 2393 case Bytecodes::_fast_bgetfield: 2394 __ ldsb(Otos_i, Roffset, Otos_i); 2395 break; 2396 case Bytecodes::_fast_cgetfield: 2397 __ lduh(Otos_i, Roffset, Otos_i); 2398 break; 2399 case Bytecodes::_fast_sgetfield: 2400 __ ldsh(Otos_i, Roffset, Otos_i); 2401 break; 2402 case Bytecodes::_fast_igetfield: 2403 __ ld(Otos_i, Roffset, Otos_i); 2404 break; 2405 case Bytecodes::_fast_lgetfield: 2406 __ ld_long(Otos_i, Roffset, Otos_l); 2407 break; 2408 case Bytecodes::_fast_fgetfield: 2409 __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f); 2410 break; 2411 case Bytecodes::_fast_dgetfield: 2412 __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d); 2413 break; 2414 case Bytecodes::_fast_agetfield: 2415 __ load_heap_oop(Otos_i, Roffset, Otos_i); 2416 break; 2417 default: 2418 ShouldNotReachHere(); 2419 } 2420 2421 if (__ membar_has_effect(membar_bits)) { 2422 __ btst(Lscratch, Rflags); 2423 __ br(Assembler::zero, false, Assembler::pt, exit); 2424 __ delayed()->nop(); 2425 volatile_barrier(membar_bits); 2426 __ bind(exit); 2427 } 2428 2429 if (state == atos) { 2430 __ verify_oop(Otos_i); // does not blow flags! 2431 } 2432 } 2433 2434 void TemplateTable::jvmti_post_fast_field_mod() { 2435 if (JvmtiExport::can_post_field_modification()) { 2436 // Check to see if a field modification watch has been set before we take 2437 // the time to call into the VM. 2438 Label done; 2439 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); 2440 __ load_contents(get_field_modification_count_addr, G4_scratch); 2441 __ cmp_and_br_short(G4_scratch, 0, Assembler::equal, Assembler::pt, done); 2442 __ pop_ptr(G4_scratch); // copy the object pointer from tos 2443 __ verify_oop(G4_scratch); 2444 __ push_ptr(G4_scratch); // put the object pointer back on tos 2445 __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1); 2446 // Save tos values before call_VM() clobbers them. Since we have 2447 // to do it for every data type, we use the saved values as the 2448 // jvalue object. 2449 switch (bytecode()) { // save tos values before call_VM() clobbers them 2450 case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break; 2451 case Bytecodes::_fast_bputfield: // fall through 2452 case Bytecodes::_fast_sputfield: // fall through 2453 case Bytecodes::_fast_cputfield: // fall through 2454 case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break; 2455 case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break; 2456 case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break; 2457 // get words in right order for use as jvalue object 2458 case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break; 2459 } 2460 // setup pointer to jvalue object 2461 __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize); 2462 // G4_scratch: object pointer 2463 // G1_scratch: cache entry pointer 2464 // G3_scratch: jvalue object on the stack 2465 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch); 2466 switch (bytecode()) { // restore tos values 2467 case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break; 2468 case Bytecodes::_fast_bputfield: // fall through 2469 case Bytecodes::_fast_sputfield: // fall through 2470 case Bytecodes::_fast_cputfield: // fall through 2471 case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break; 2472 case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break; 2473 case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break; 2474 case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break; 2475 } 2476 __ bind(done); 2477 } 2478 } 2479 2480 // The registers Rcache and index expected to be set before call. 2481 // The function may destroy various registers, just not the Rcache and index registers. 2482 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) { 2483 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2484 2485 if (JvmtiExport::can_post_field_modification()) { 2486 // Check to see if a field modification watch has been set before we take 2487 // the time to call into the VM. 2488 Label Label1; 2489 assert_different_registers(Rcache, index, G1_scratch); 2490 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); 2491 __ load_contents(get_field_modification_count_addr, G1_scratch); 2492 __ cmp_and_br_short(G1_scratch, 0, Assembler::zero, Assembler::pt, Label1); 2493 2494 // The Rcache and index registers have been already set. 2495 // This allows to eliminate this call but the Rcache and index 2496 // registers must be correspondingly used after this line. 2497 __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1); 2498 2499 __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch); 2500 if (is_static) { 2501 // Life is simple. Null out the object pointer. 2502 __ clr(G4_scratch); 2503 } else { 2504 Register Rflags = G1_scratch; 2505 // Life is harder. The stack holds the value on top, followed by the 2506 // object. We don't know the size of the value, though; it could be 2507 // one or two words depending on its type. As a result, we must find 2508 // the type to determine where the object is. 2509 2510 Label two_word, valsizeknown; 2511 __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2512 __ mov(Lesp, G4_scratch); 2513 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2514 // Make sure we don't need to mask Rflags after the above shift 2515 ConstantPoolCacheEntry::verify_tos_state_shift(); 2516 __ cmp(Rflags, ltos); 2517 __ br(Assembler::equal, false, Assembler::pt, two_word); 2518 __ delayed()->cmp(Rflags, dtos); 2519 __ br(Assembler::equal, false, Assembler::pt, two_word); 2520 __ delayed()->nop(); 2521 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1)); 2522 __ ba_short(valsizeknown); 2523 __ bind(two_word); 2524 2525 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2)); 2526 2527 __ bind(valsizeknown); 2528 // setup object pointer 2529 __ ld_ptr(G4_scratch, 0, G4_scratch); 2530 __ verify_oop(G4_scratch); 2531 } 2532 // setup pointer to jvalue object 2533 __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize); 2534 // G4_scratch: object pointer or NULL if static 2535 // G3_scratch: cache entry pointer 2536 // G1_scratch: jvalue object on the stack 2537 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), 2538 G4_scratch, G3_scratch, G1_scratch); 2539 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2540 __ bind(Label1); 2541 } 2542 } 2543 2544 void TemplateTable::pop_and_check_object(Register r) { 2545 __ pop_ptr(r); 2546 __ null_check(r); // for field access must check obj. 2547 __ verify_oop(r); 2548 } 2549 2550 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2551 transition(vtos, vtos); 2552 Register Rcache = G3_scratch; 2553 Register index = G4_scratch; 2554 Register Rclass = Rcache; 2555 Register Roffset= G4_scratch; 2556 Register Rflags = G1_scratch; 2557 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2558 2559 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2)); 2560 jvmti_post_field_mod(Rcache, index, is_static); 2561 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); 2562 2563 Assembler::Membar_mask_bits read_bits = 2564 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); 2565 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; 2566 2567 Label notVolatile, checkVolatile, exit; 2568 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { 2569 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2570 __ and3(Rflags, Lscratch, Lscratch); 2571 2572 if (__ membar_has_effect(read_bits)) { 2573 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); 2574 volatile_barrier(read_bits); 2575 __ bind(notVolatile); 2576 } 2577 } 2578 2579 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2580 // Make sure we don't need to mask Rflags after the above shift 2581 ConstantPoolCacheEntry::verify_tos_state_shift(); 2582 2583 // compute field type 2584 Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat; 2585 2586 if (is_static) { 2587 // putstatic with object type most likely, check that first 2588 __ cmp(Rflags, atos); 2589 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2590 __ delayed()->cmp(Rflags, itos); 2591 2592 // atos 2593 { 2594 __ pop_ptr(); 2595 __ verify_oop(Otos_i); 2596 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2597 __ ba(checkVolatile); 2598 __ delayed()->tst(Lscratch); 2599 } 2600 2601 __ bind(notObj); 2602 // cmp(Rflags, itos); 2603 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2604 __ delayed()->cmp(Rflags, btos); 2605 2606 // itos 2607 { 2608 __ pop_i(); 2609 __ st(Otos_i, Rclass, Roffset); 2610 __ ba(checkVolatile); 2611 __ delayed()->tst(Lscratch); 2612 } 2613 2614 __ bind(notInt); 2615 } else { 2616 // putfield with int type most likely, check that first 2617 __ cmp(Rflags, itos); 2618 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2619 __ delayed()->cmp(Rflags, atos); 2620 2621 // itos 2622 { 2623 __ pop_i(); 2624 pop_and_check_object(Rclass); 2625 __ st(Otos_i, Rclass, Roffset); 2626 if (rc == may_rewrite) patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no); 2627 __ ba(checkVolatile); 2628 __ delayed()->tst(Lscratch); 2629 } 2630 2631 __ bind(notInt); 2632 // cmp(Rflags, atos); 2633 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2634 __ delayed()->cmp(Rflags, btos); 2635 2636 // atos 2637 { 2638 __ pop_ptr(); 2639 pop_and_check_object(Rclass); 2640 __ verify_oop(Otos_i); 2641 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2642 if (rc == may_rewrite) patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no); 2643 __ ba(checkVolatile); 2644 __ delayed()->tst(Lscratch); 2645 } 2646 2647 __ bind(notObj); 2648 } 2649 2650 // cmp(Rflags, btos); 2651 __ br(Assembler::notEqual, false, Assembler::pt, notByte); 2652 __ delayed()->cmp(Rflags, ltos); 2653 2654 // btos 2655 { 2656 __ pop_i(); 2657 if (!is_static) pop_and_check_object(Rclass); 2658 __ stb(Otos_i, Rclass, Roffset); 2659 if (!is_static && rc == may_rewrite) { 2660 patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no); 2661 } 2662 __ ba(checkVolatile); 2663 __ delayed()->tst(Lscratch); 2664 } 2665 2666 __ bind(notByte); 2667 // cmp(Rflags, ltos); 2668 __ br(Assembler::notEqual, false, Assembler::pt, notLong); 2669 __ delayed()->cmp(Rflags, ctos); 2670 2671 // ltos 2672 { 2673 __ pop_l(); 2674 if (!is_static) pop_and_check_object(Rclass); 2675 __ st_long(Otos_l, Rclass, Roffset); 2676 if (!is_static && rc == may_rewrite) { 2677 patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no); 2678 } 2679 __ ba(checkVolatile); 2680 __ delayed()->tst(Lscratch); 2681 } 2682 2683 __ bind(notLong); 2684 // cmp(Rflags, ctos); 2685 __ br(Assembler::notEqual, false, Assembler::pt, notChar); 2686 __ delayed()->cmp(Rflags, stos); 2687 2688 // ctos (char) 2689 { 2690 __ pop_i(); 2691 if (!is_static) pop_and_check_object(Rclass); 2692 __ sth(Otos_i, Rclass, Roffset); 2693 if (!is_static && rc == may_rewrite) { 2694 patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no); 2695 } 2696 __ ba(checkVolatile); 2697 __ delayed()->tst(Lscratch); 2698 } 2699 2700 __ bind(notChar); 2701 // cmp(Rflags, stos); 2702 __ br(Assembler::notEqual, false, Assembler::pt, notShort); 2703 __ delayed()->cmp(Rflags, ftos); 2704 2705 // stos (short) 2706 { 2707 __ pop_i(); 2708 if (!is_static) pop_and_check_object(Rclass); 2709 __ sth(Otos_i, Rclass, Roffset); 2710 if (!is_static && rc == may_rewrite) { 2711 patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no); 2712 } 2713 __ ba(checkVolatile); 2714 __ delayed()->tst(Lscratch); 2715 } 2716 2717 __ bind(notShort); 2718 // cmp(Rflags, ftos); 2719 __ br(Assembler::notZero, false, Assembler::pt, notFloat); 2720 __ delayed()->nop(); 2721 2722 // ftos 2723 { 2724 __ pop_f(); 2725 if (!is_static) pop_and_check_object(Rclass); 2726 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); 2727 if (!is_static && rc == may_rewrite) { 2728 patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no); 2729 } 2730 __ ba(checkVolatile); 2731 __ delayed()->tst(Lscratch); 2732 } 2733 2734 __ bind(notFloat); 2735 2736 // dtos 2737 { 2738 __ pop_d(); 2739 if (!is_static) pop_and_check_object(Rclass); 2740 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); 2741 if (!is_static && rc == may_rewrite) { 2742 patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no); 2743 } 2744 } 2745 2746 __ bind(checkVolatile); 2747 __ tst(Lscratch); 2748 2749 if (__ membar_has_effect(write_bits)) { 2750 // __ tst(Lscratch); in delay slot 2751 __ br(Assembler::zero, false, Assembler::pt, exit); 2752 __ delayed()->nop(); 2753 volatile_barrier(Assembler::StoreLoad); 2754 __ bind(exit); 2755 } 2756 } 2757 2758 void TemplateTable::fast_storefield(TosState state) { 2759 transition(state, vtos); 2760 Register Rcache = G3_scratch; 2761 Register Rclass = Rcache; 2762 Register Roffset= G4_scratch; 2763 Register Rflags = G1_scratch; 2764 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2765 2766 jvmti_post_fast_field_mod(); 2767 2768 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1); 2769 2770 Assembler::Membar_mask_bits read_bits = 2771 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); 2772 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; 2773 2774 Label notVolatile, checkVolatile, exit; 2775 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { 2776 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2777 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2778 __ and3(Rflags, Lscratch, Lscratch); 2779 if (__ membar_has_effect(read_bits)) { 2780 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); 2781 volatile_barrier(read_bits); 2782 __ bind(notVolatile); 2783 } 2784 } 2785 2786 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2787 pop_and_check_object(Rclass); 2788 2789 switch (bytecode()) { 2790 case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break; 2791 case Bytecodes::_fast_cputfield: /* fall through */ 2792 case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break; 2793 case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break; 2794 case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break; 2795 case Bytecodes::_fast_fputfield: 2796 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); 2797 break; 2798 case Bytecodes::_fast_dputfield: 2799 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); 2800 break; 2801 case Bytecodes::_fast_aputfield: 2802 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2803 break; 2804 default: 2805 ShouldNotReachHere(); 2806 } 2807 2808 if (__ membar_has_effect(write_bits)) { 2809 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, exit); 2810 volatile_barrier(Assembler::StoreLoad); 2811 __ bind(exit); 2812 } 2813 } 2814 2815 void TemplateTable::putfield(int byte_no) { 2816 putfield_or_static(byte_no, false); 2817 } 2818 2819 void TemplateTable::nofast_putfield(int byte_no) { 2820 putfield_or_static(byte_no, false, may_not_rewrite); 2821 } 2822 2823 void TemplateTable::putstatic(int byte_no) { 2824 putfield_or_static(byte_no, true); 2825 } 2826 2827 void TemplateTable::fast_xaccess(TosState state) { 2828 transition(vtos, state); 2829 Register Rcache = G3_scratch; 2830 Register Roffset = G4_scratch; 2831 Register Rflags = G4_scratch; 2832 Register Rreceiver = Lscratch; 2833 2834 __ ld_ptr(Llocals, 0, Rreceiver); 2835 2836 // access constant pool cache (is resolved) 2837 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2); 2838 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset); 2839 __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp 2840 2841 __ verify_oop(Rreceiver); 2842 __ null_check(Rreceiver); 2843 if (state == atos) { 2844 __ load_heap_oop(Rreceiver, Roffset, Otos_i); 2845 } else if (state == itos) { 2846 __ ld (Rreceiver, Roffset, Otos_i) ; 2847 } else if (state == ftos) { 2848 __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f); 2849 } else { 2850 ShouldNotReachHere(); 2851 } 2852 2853 Assembler::Membar_mask_bits membar_bits = 2854 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2855 if (__ membar_has_effect(membar_bits)) { 2856 2857 // Get is_volatile value in Rflags and check if membar is needed 2858 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags); 2859 2860 // Test volatile 2861 Label notVolatile; 2862 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2863 __ btst(Rflags, Lscratch); 2864 __ br(Assembler::zero, false, Assembler::pt, notVolatile); 2865 __ delayed()->nop(); 2866 volatile_barrier(membar_bits); 2867 __ bind(notVolatile); 2868 } 2869 2870 __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 2871 __ sub(Lbcp, 1, Lbcp); 2872 } 2873 2874 //---------------------------------------------------------------------------------------------------- 2875 // Calls 2876 2877 void TemplateTable::count_calls(Register method, Register temp) { 2878 // implemented elsewhere 2879 ShouldNotReachHere(); 2880 } 2881 2882 void TemplateTable::prepare_invoke(int byte_no, 2883 Register method, // linked method (or i-klass) 2884 Register ra, // return address 2885 Register index, // itable index, MethodType, etc. 2886 Register recv, // if caller wants to see it 2887 Register flags // if caller wants to test it 2888 ) { 2889 // determine flags 2890 const Bytecodes::Code code = bytecode(); 2891 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 2892 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 2893 const bool is_invokehandle = code == Bytecodes::_invokehandle; 2894 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 2895 const bool is_invokespecial = code == Bytecodes::_invokespecial; 2896 const bool load_receiver = (recv != noreg); 2897 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 2898 assert(recv == noreg || recv == O0, ""); 2899 assert(flags == noreg || flags == O1, ""); 2900 2901 // setup registers & access constant pool cache 2902 if (recv == noreg) recv = O0; 2903 if (flags == noreg) flags = O1; 2904 const Register temp = O2; 2905 assert_different_registers(method, ra, index, recv, flags, temp); 2906 2907 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); 2908 2909 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 2910 2911 // maybe push appendix to arguments 2912 if (is_invokedynamic || is_invokehandle) { 2913 Label L_no_push; 2914 __ set((1 << ConstantPoolCacheEntry::has_appendix_shift), temp); 2915 __ btst(flags, temp); 2916 __ br(Assembler::zero, false, Assembler::pt, L_no_push); 2917 __ delayed()->nop(); 2918 // Push the appendix as a trailing parameter. 2919 // This must be done before we get the receiver, 2920 // since the parameter_size includes it. 2921 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0"); 2922 __ load_resolved_reference_at_index(temp, index); 2923 __ verify_oop(temp); 2924 __ push_ptr(temp); // push appendix (MethodType, CallSite, etc.) 2925 __ bind(L_no_push); 2926 } 2927 2928 // load receiver if needed (after appendix is pushed so parameter size is correct) 2929 if (load_receiver) { 2930 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, temp); // get parameter size 2931 __ load_receiver(temp, recv); // __ argument_address uses Gargs but we need Lesp 2932 __ verify_oop(recv); 2933 } 2934 2935 // compute return type 2936 __ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra); 2937 // Make sure we don't need to mask flags after the above shift 2938 ConstantPoolCacheEntry::verify_tos_state_shift(); 2939 // load return address 2940 { 2941 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 2942 AddressLiteral table(table_addr); 2943 __ set(table, temp); 2944 __ sll(ra, LogBytesPerWord, ra); 2945 __ ld_ptr(Address(temp, ra), ra); 2946 } 2947 } 2948 2949 2950 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) { 2951 Register Rtemp = G4_scratch; 2952 Register Rcall = Rindex; 2953 assert_different_registers(Rcall, G5_method, Gargs, Rret); 2954 2955 // get target Method* & entry point 2956 __ lookup_virtual_method(Rrecv, Rindex, G5_method); 2957 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 2958 __ profile_called_method(G5_method, Rtemp); 2959 __ call_from_interpreter(Rcall, Gargs, Rret); 2960 } 2961 2962 void TemplateTable::invokevirtual(int byte_no) { 2963 transition(vtos, vtos); 2964 assert(byte_no == f2_byte, "use this argument"); 2965 2966 Register Rscratch = G3_scratch; 2967 Register Rtemp = G4_scratch; 2968 Register Rret = Lscratch; 2969 Register O0_recv = O0; 2970 Label notFinal; 2971 2972 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false); 2973 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 2974 2975 // Check for vfinal 2976 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), G4_scratch); 2977 __ btst(Rret, G4_scratch); 2978 __ br(Assembler::zero, false, Assembler::pt, notFinal); 2979 __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters 2980 2981 if (RewriteBytecodes && !UseSharedSpaces) { 2982 patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp); 2983 } 2984 2985 invokevfinal_helper(Rscratch, Rret); 2986 2987 __ bind(notFinal); 2988 2989 __ mov(G5_method, Rscratch); // better scratch register 2990 __ load_receiver(G4_scratch, O0_recv); // gets receiverOop 2991 // receiver is in O0_recv 2992 __ verify_oop(O0_recv); 2993 2994 // get return address 2995 AddressLiteral table(Interpreter::invoke_return_entry_table()); 2996 __ set(table, Rtemp); 2997 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type 2998 // Make sure we don't need to mask Rret after the above shift 2999 ConstantPoolCacheEntry::verify_tos_state_shift(); 3000 __ sll(Rret, LogBytesPerWord, Rret); 3001 __ ld_ptr(Rtemp, Rret, Rret); // get return address 3002 3003 // get receiver klass 3004 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); 3005 __ load_klass(O0_recv, O0_recv); 3006 __ verify_klass_ptr(O0_recv); 3007 3008 __ profile_virtual_call(O0_recv, O4); 3009 3010 generate_vtable_call(O0_recv, Rscratch, Rret); 3011 } 3012 3013 void TemplateTable::fast_invokevfinal(int byte_no) { 3014 transition(vtos, vtos); 3015 assert(byte_no == f2_byte, "use this argument"); 3016 3017 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true, 3018 /*is_invokevfinal*/true, false); 3019 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 3020 invokevfinal_helper(G3_scratch, Lscratch); 3021 } 3022 3023 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) { 3024 Register Rtemp = G4_scratch; 3025 3026 // Load receiver from stack slot 3027 __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G4_scratch); 3028 __ lduh(G4_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), G4_scratch); 3029 __ load_receiver(G4_scratch, O0); 3030 3031 // receiver NULL check 3032 __ null_check(O0); 3033 3034 __ profile_final_call(O4); 3035 __ profile_arguments_type(G5_method, Rscratch, Gargs, true); 3036 3037 // get return address 3038 AddressLiteral table(Interpreter::invoke_return_entry_table()); 3039 __ set(table, Rtemp); 3040 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type 3041 // Make sure we don't need to mask Rret after the above shift 3042 ConstantPoolCacheEntry::verify_tos_state_shift(); 3043 __ sll(Rret, LogBytesPerWord, Rret); 3044 __ ld_ptr(Rtemp, Rret, Rret); // get return address 3045 3046 3047 // do the call 3048 __ call_from_interpreter(Rscratch, Gargs, Rret); 3049 } 3050 3051 3052 void TemplateTable::invokespecial(int byte_no) { 3053 transition(vtos, vtos); 3054 assert(byte_no == f1_byte, "use this argument"); 3055 3056 const Register Rret = Lscratch; 3057 const Register O0_recv = O0; 3058 const Register Rscratch = G3_scratch; 3059 3060 prepare_invoke(byte_no, G5_method, Rret, noreg, O0_recv); // get receiver also for null check 3061 __ null_check(O0_recv); 3062 3063 // do the call 3064 __ profile_call(O4); 3065 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3066 __ call_from_interpreter(Rscratch, Gargs, Rret); 3067 } 3068 3069 3070 void TemplateTable::invokestatic(int byte_no) { 3071 transition(vtos, vtos); 3072 assert(byte_no == f1_byte, "use this argument"); 3073 3074 const Register Rret = Lscratch; 3075 const Register Rscratch = G3_scratch; 3076 3077 prepare_invoke(byte_no, G5_method, Rret); // get f1 Method* 3078 3079 // do the call 3080 __ profile_call(O4); 3081 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3082 __ call_from_interpreter(Rscratch, Gargs, Rret); 3083 } 3084 3085 void TemplateTable::invokeinterface_object_method(Register RKlass, 3086 Register Rcall, 3087 Register Rret, 3088 Register Rflags) { 3089 Register Rscratch = G4_scratch; 3090 Register Rindex = Lscratch; 3091 3092 assert_different_registers(Rscratch, Rindex, Rret); 3093 3094 Label notFinal; 3095 3096 // Check for vfinal 3097 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch); 3098 __ btst(Rflags, Rscratch); 3099 __ br(Assembler::zero, false, Assembler::pt, notFinal); 3100 __ delayed()->nop(); 3101 3102 __ profile_final_call(O4); 3103 3104 // do the call - the index (f2) contains the Method* 3105 assert_different_registers(G5_method, Gargs, Rcall); 3106 __ mov(Rindex, G5_method); 3107 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 3108 __ call_from_interpreter(Rcall, Gargs, Rret); 3109 __ bind(notFinal); 3110 3111 __ profile_virtual_call(RKlass, O4); 3112 generate_vtable_call(RKlass, Rindex, Rret); 3113 } 3114 3115 3116 void TemplateTable::invokeinterface(int byte_no) { 3117 transition(vtos, vtos); 3118 assert(byte_no == f1_byte, "use this argument"); 3119 3120 const Register Rinterface = G1_scratch; 3121 const Register Rret = G3_scratch; 3122 const Register Rindex = Lscratch; 3123 const Register O0_recv = O0; 3124 const Register O1_flags = O1; 3125 const Register O2_Klass = O2; 3126 const Register Rscratch = G4_scratch; 3127 assert_different_registers(Rscratch, G5_method); 3128 3129 prepare_invoke(byte_no, Rinterface, Rret, Rindex, O0_recv, O1_flags); 3130 3131 // get receiver klass 3132 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); 3133 __ load_klass(O0_recv, O2_Klass); 3134 3135 // Special case of invokeinterface called for virtual method of 3136 // java.lang.Object. See cpCacheOop.cpp for details. 3137 // This code isn't produced by javac, but could be produced by 3138 // another compliant java compiler. 3139 Label notMethod; 3140 __ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch); 3141 __ btst(O1_flags, Rscratch); 3142 __ br(Assembler::zero, false, Assembler::pt, notMethod); 3143 __ delayed()->nop(); 3144 3145 invokeinterface_object_method(O2_Klass, Rinterface, Rret, O1_flags); 3146 3147 __ bind(notMethod); 3148 3149 __ profile_virtual_call(O2_Klass, O4); 3150 3151 // 3152 // find entry point to call 3153 // 3154 3155 // compute start of first itableOffsetEntry (which is at end of vtable) 3156 const int base = InstanceKlass::vtable_start_offset() * wordSize; 3157 Label search; 3158 Register Rtemp = O1_flags; 3159 3160 __ ld(O2_Klass, InstanceKlass::vtable_length_offset() * wordSize, Rtemp); 3161 if (align_object_offset(1) > 1) { 3162 __ round_to(Rtemp, align_object_offset(1)); 3163 } 3164 __ sll(Rtemp, LogBytesPerWord, Rtemp); // Rscratch *= 4; 3165 if (Assembler::is_simm13(base)) { 3166 __ add(Rtemp, base, Rtemp); 3167 } else { 3168 __ set(base, Rscratch); 3169 __ add(Rscratch, Rtemp, Rtemp); 3170 } 3171 __ add(O2_Klass, Rtemp, Rscratch); 3172 3173 __ bind(search); 3174 3175 __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp); 3176 { 3177 Label ok; 3178 3179 // Check that entry is non-null. Null entries are probably a bytecode 3180 // problem. If the interface isn't implemented by the receiver class, 3181 // the VM should throw IncompatibleClassChangeError. linkResolver checks 3182 // this too but that's only if the entry isn't already resolved, so we 3183 // need to check again. 3184 __ br_notnull_short( Rtemp, Assembler::pt, ok); 3185 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); 3186 __ should_not_reach_here(); 3187 __ bind(ok); 3188 } 3189 3190 __ cmp(Rinterface, Rtemp); 3191 __ brx(Assembler::notEqual, true, Assembler::pn, search); 3192 __ delayed()->add(Rscratch, itableOffsetEntry::size() * wordSize, Rscratch); 3193 3194 // entry found and Rscratch points to it 3195 __ ld(Rscratch, itableOffsetEntry::offset_offset_in_bytes(), Rscratch); 3196 3197 assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below"); 3198 __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8; 3199 __ add(Rscratch, Rindex, Rscratch); 3200 __ ld_ptr(O2_Klass, Rscratch, G5_method); 3201 3202 // Check for abstract method error. 3203 { 3204 Label ok; 3205 __ br_notnull_short(G5_method, Assembler::pt, ok); 3206 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 3207 __ should_not_reach_here(); 3208 __ bind(ok); 3209 } 3210 3211 Register Rcall = Rinterface; 3212 assert_different_registers(Rcall, G5_method, Gargs, Rret); 3213 3214 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 3215 __ profile_called_method(G5_method, Rscratch); 3216 __ call_from_interpreter(Rcall, Gargs, Rret); 3217 } 3218 3219 void TemplateTable::invokehandle(int byte_no) { 3220 transition(vtos, vtos); 3221 assert(byte_no == f1_byte, "use this argument"); 3222 3223 const Register Rret = Lscratch; 3224 const Register G4_mtype = G4_scratch; 3225 const Register O0_recv = O0; 3226 const Register Rscratch = G3_scratch; 3227 3228 prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv); 3229 __ null_check(O0_recv); 3230 3231 // G4: MethodType object (from cpool->resolved_references[f1], if necessary) 3232 // G5: MH.invokeExact_MT method (from f2) 3233 3234 // Note: G4_mtype is already pushed (if necessary) by prepare_invoke 3235 3236 // do the call 3237 __ verify_oop(G4_mtype); 3238 __ profile_final_call(O4); // FIXME: profile the LambdaForm also 3239 __ profile_arguments_type(G5_method, Rscratch, Gargs, true); 3240 __ call_from_interpreter(Rscratch, Gargs, Rret); 3241 } 3242 3243 3244 void TemplateTable::invokedynamic(int byte_no) { 3245 transition(vtos, vtos); 3246 assert(byte_no == f1_byte, "use this argument"); 3247 3248 const Register Rret = Lscratch; 3249 const Register G4_callsite = G4_scratch; 3250 const Register Rscratch = G3_scratch; 3251 3252 prepare_invoke(byte_no, G5_method, Rret, G4_callsite); 3253 3254 // G4: CallSite object (from cpool->resolved_references[f1]) 3255 // G5: MH.linkToCallSite method (from f2) 3256 3257 // Note: G4_callsite is already pushed by prepare_invoke 3258 3259 // %%% should make a type profile for any invokedynamic that takes a ref argument 3260 // profile this call 3261 __ profile_call(O4); 3262 3263 // do the call 3264 __ verify_oop(G4_callsite); 3265 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3266 __ call_from_interpreter(Rscratch, Gargs, Rret); 3267 } 3268 3269 3270 //---------------------------------------------------------------------------------------------------- 3271 // Allocation 3272 3273 void TemplateTable::_new() { 3274 transition(vtos, atos); 3275 3276 Label slow_case; 3277 Label done; 3278 Label initialize_header; 3279 Label initialize_object; // including clearing the fields 3280 3281 Register RallocatedObject = Otos_i; 3282 Register RinstanceKlass = O1; 3283 Register Roffset = O3; 3284 Register Rscratch = O4; 3285 3286 __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3287 __ get_cpool_and_tags(Rscratch, G3_scratch); 3288 // make sure the class we're about to instantiate has been resolved 3289 // This is done before loading InstanceKlass to be consistent with the order 3290 // how Constant Pool is updated (see ConstantPool::klass_at_put) 3291 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3292 __ ldub(G3_scratch, Roffset, G3_scratch); 3293 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3294 __ br(Assembler::notEqual, false, Assembler::pn, slow_case); 3295 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3296 // get InstanceKlass 3297 //__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot 3298 __ add(Roffset, sizeof(ConstantPool), Roffset); 3299 __ ld_ptr(Rscratch, Roffset, RinstanceKlass); 3300 3301 // make sure klass is fully initialized: 3302 __ ldub(RinstanceKlass, in_bytes(InstanceKlass::init_state_offset()), G3_scratch); 3303 __ cmp(G3_scratch, InstanceKlass::fully_initialized); 3304 __ br(Assembler::notEqual, false, Assembler::pn, slow_case); 3305 __ delayed()->ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset); 3306 3307 // get instance_size in InstanceKlass (already aligned) 3308 //__ ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset); 3309 3310 // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class 3311 __ btst(Klass::_lh_instance_slow_path_bit, Roffset); 3312 __ br(Assembler::notZero, false, Assembler::pn, slow_case); 3313 __ delayed()->nop(); 3314 3315 // allocate the instance 3316 // 1) Try to allocate in the TLAB 3317 // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden 3318 // 3) if the above fails (or is not applicable), go to a slow case 3319 // (creates a new TLAB, etc.) 3320 3321 const bool allow_shared_alloc = 3322 Universe::heap()->supports_inline_contig_alloc(); 3323 3324 if(UseTLAB) { 3325 Register RoldTopValue = RallocatedObject; 3326 Register RtlabWasteLimitValue = G3_scratch; 3327 Register RnewTopValue = G1_scratch; 3328 Register RendValue = Rscratch; 3329 Register RfreeValue = RnewTopValue; 3330 3331 // check if we can allocate in the TLAB 3332 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject 3333 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue); 3334 __ add(RoldTopValue, Roffset, RnewTopValue); 3335 3336 // if there is enough space, we do not CAS and do not clear 3337 __ cmp(RnewTopValue, RendValue); 3338 if(ZeroTLAB) { 3339 // the fields have already been cleared 3340 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header); 3341 } else { 3342 // initialize both the header and fields 3343 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object); 3344 } 3345 __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3346 3347 if (allow_shared_alloc) { 3348 // Check if tlab should be discarded (refill_waste_limit >= free) 3349 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue); 3350 __ sub(RendValue, RoldTopValue, RfreeValue); 3351 #ifdef _LP64 3352 __ srlx(RfreeValue, LogHeapWordSize, RfreeValue); 3353 #else 3354 __ srl(RfreeValue, LogHeapWordSize, RfreeValue); 3355 #endif 3356 __ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small 3357 3358 // increment waste limit to prevent getting stuck on this slow path 3359 __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue); 3360 __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); 3361 } else { 3362 // No allocation in the shared eden. 3363 __ ba_short(slow_case); 3364 } 3365 } 3366 3367 // Allocation in the shared Eden 3368 if (allow_shared_alloc) { 3369 Register RoldTopValue = G1_scratch; 3370 Register RtopAddr = G3_scratch; 3371 Register RnewTopValue = RallocatedObject; 3372 Register RendValue = Rscratch; 3373 3374 __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr); 3375 3376 Label retry; 3377 __ bind(retry); 3378 __ set((intptr_t)Universe::heap()->end_addr(), RendValue); 3379 __ ld_ptr(RendValue, 0, RendValue); 3380 __ ld_ptr(RtopAddr, 0, RoldTopValue); 3381 __ add(RoldTopValue, Roffset, RnewTopValue); 3382 3383 // RnewTopValue contains the top address after the new object 3384 // has been allocated. 3385 __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case); 3386 3387 __ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue); 3388 3389 // if someone beat us on the allocation, try again, otherwise continue 3390 __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry); 3391 3392 // bump total bytes allocated by this thread 3393 // RoldTopValue and RtopAddr are dead, so can use G1 and G3 3394 __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch); 3395 } 3396 3397 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { 3398 // clear object fields 3399 __ bind(initialize_object); 3400 __ deccc(Roffset, sizeof(oopDesc)); 3401 __ br(Assembler::zero, false, Assembler::pt, initialize_header); 3402 __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch); 3403 3404 // initialize remaining object fields 3405 if (UseBlockZeroing) { 3406 // Use BIS for zeroing 3407 __ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header); 3408 } else { 3409 Label loop; 3410 __ subcc(Roffset, wordSize, Roffset); 3411 __ bind(loop); 3412 //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot 3413 __ st_ptr(G0, G3_scratch, Roffset); 3414 __ br(Assembler::notEqual, false, Assembler::pt, loop); 3415 __ delayed()->subcc(Roffset, wordSize, Roffset); 3416 } 3417 __ ba_short(initialize_header); 3418 } 3419 3420 // slow case 3421 __ bind(slow_case); 3422 __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned); 3423 __ get_constant_pool(O1); 3424 3425 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2); 3426 3427 __ ba_short(done); 3428 3429 // Initialize the header: mark, klass 3430 __ bind(initialize_header); 3431 3432 if (UseBiasedLocking) { 3433 __ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch); 3434 } else { 3435 __ set((intptr_t)markOopDesc::prototype(), G4_scratch); 3436 } 3437 __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark 3438 __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed 3439 __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms) 3440 3441 { 3442 SkipIfEqual skip_if( 3443 _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero); 3444 // Trigger dtrace event 3445 __ push(atos); 3446 __ call_VM_leaf(noreg, 3447 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0); 3448 __ pop(atos); 3449 } 3450 3451 // continue 3452 __ bind(done); 3453 } 3454 3455 3456 3457 void TemplateTable::newarray() { 3458 transition(itos, atos); 3459 __ ldub(Lbcp, 1, O1); 3460 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i); 3461 } 3462 3463 3464 void TemplateTable::anewarray() { 3465 transition(itos, atos); 3466 __ get_constant_pool(O1); 3467 __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned); 3468 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i); 3469 } 3470 3471 3472 void TemplateTable::arraylength() { 3473 transition(atos, itos); 3474 Label ok; 3475 __ verify_oop(Otos_i); 3476 __ tst(Otos_i); 3477 __ throw_if_not_1_x( Assembler::notZero, ok ); 3478 __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i); 3479 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok); 3480 } 3481 3482 3483 void TemplateTable::checkcast() { 3484 transition(atos, atos); 3485 Label done, is_null, quicked, cast_ok, resolved; 3486 Register Roffset = G1_scratch; 3487 Register RobjKlass = O5; 3488 Register RspecifiedKlass = O4; 3489 3490 // Check for casting a NULL 3491 __ br_null(Otos_i, false, Assembler::pn, is_null); 3492 __ delayed()->nop(); 3493 3494 // Get value klass in RobjKlass 3495 __ load_klass(Otos_i, RobjKlass); // get value klass 3496 3497 // Get constant pool tag 3498 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3499 3500 // See if the checkcast has been quickened 3501 __ get_cpool_and_tags(Lscratch, G3_scratch); 3502 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3503 __ ldub(G3_scratch, Roffset, G3_scratch); 3504 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3505 __ br(Assembler::equal, true, Assembler::pt, quicked); 3506 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3507 3508 __ push_ptr(); // save receiver for result, and for GC 3509 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); 3510 __ get_vm_result_2(RspecifiedKlass); 3511 __ pop_ptr(Otos_i, G3_scratch); // restore receiver 3512 3513 __ ba_short(resolved); 3514 3515 // Extract target class from constant pool 3516 __ bind(quicked); 3517 __ add(Roffset, sizeof(ConstantPool), Roffset); 3518 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass); 3519 __ bind(resolved); 3520 __ load_klass(Otos_i, RobjKlass); // get value klass 3521 3522 // Generate a fast subtype check. Branch to cast_ok if no 3523 // failure. Throw exception if failure. 3524 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok ); 3525 3526 // Not a subtype; so must throw exception 3527 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch ); 3528 3529 __ bind(cast_ok); 3530 3531 if (ProfileInterpreter) { 3532 __ ba_short(done); 3533 } 3534 __ bind(is_null); 3535 __ profile_null_seen(G3_scratch); 3536 __ bind(done); 3537 } 3538 3539 3540 void TemplateTable::instanceof() { 3541 Label done, is_null, quicked, resolved; 3542 transition(atos, itos); 3543 Register Roffset = G1_scratch; 3544 Register RobjKlass = O5; 3545 Register RspecifiedKlass = O4; 3546 3547 // Check for casting a NULL 3548 __ br_null(Otos_i, false, Assembler::pt, is_null); 3549 __ delayed()->nop(); 3550 3551 // Get value klass in RobjKlass 3552 __ load_klass(Otos_i, RobjKlass); // get value klass 3553 3554 // Get constant pool tag 3555 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3556 3557 // See if the checkcast has been quickened 3558 __ get_cpool_and_tags(Lscratch, G3_scratch); 3559 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3560 __ ldub(G3_scratch, Roffset, G3_scratch); 3561 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3562 __ br(Assembler::equal, true, Assembler::pt, quicked); 3563 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3564 3565 __ push_ptr(); // save receiver for result, and for GC 3566 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); 3567 __ get_vm_result_2(RspecifiedKlass); 3568 __ pop_ptr(Otos_i, G3_scratch); // restore receiver 3569 3570 __ ba_short(resolved); 3571 3572 // Extract target class from constant pool 3573 __ bind(quicked); 3574 __ add(Roffset, sizeof(ConstantPool), Roffset); 3575 __ get_constant_pool(Lscratch); 3576 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass); 3577 __ bind(resolved); 3578 __ load_klass(Otos_i, RobjKlass); // get value klass 3579 3580 // Generate a fast subtype check. Branch to cast_ok if no 3581 // failure. Return 0 if failure. 3582 __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed 3583 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done ); 3584 // Not a subtype; return 0; 3585 __ clr( Otos_i ); 3586 3587 if (ProfileInterpreter) { 3588 __ ba_short(done); 3589 } 3590 __ bind(is_null); 3591 __ profile_null_seen(G3_scratch); 3592 __ bind(done); 3593 } 3594 3595 void TemplateTable::_breakpoint() { 3596 3597 // Note: We get here even if we are single stepping.. 3598 // jbug inists on setting breakpoints at every bytecode 3599 // even if we are in single step mode. 3600 3601 transition(vtos, vtos); 3602 // get the unpatched byte code 3603 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp); 3604 __ mov(O0, Lbyte_code); 3605 3606 // post the breakpoint event 3607 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp); 3608 3609 // complete the execution of original bytecode 3610 __ dispatch_normal(vtos); 3611 } 3612 3613 3614 //---------------------------------------------------------------------------------------------------- 3615 // Exceptions 3616 3617 void TemplateTable::athrow() { 3618 transition(atos, vtos); 3619 3620 // This works because exception is cached in Otos_i which is same as O0, 3621 // which is same as what throw_exception_entry_expects 3622 assert(Otos_i == Oexception, "see explanation above"); 3623 3624 __ verify_oop(Otos_i); 3625 __ null_check(Otos_i); 3626 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch); 3627 } 3628 3629 3630 //---------------------------------------------------------------------------------------------------- 3631 // Synchronization 3632 3633 3634 // See frame_sparc.hpp for monitor block layout. 3635 // Monitor elements are dynamically allocated by growing stack as needed. 3636 3637 void TemplateTable::monitorenter() { 3638 transition(atos, vtos); 3639 __ verify_oop(Otos_i); 3640 // Try to acquire a lock on the object 3641 // Repeat until succeeded (i.e., until 3642 // monitorenter returns true). 3643 3644 { Label ok; 3645 __ tst(Otos_i); 3646 __ throw_if_not_1_x( Assembler::notZero, ok); 3647 __ delayed()->mov(Otos_i, Lscratch); // save obj 3648 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok); 3649 } 3650 3651 assert(O0 == Otos_i, "Be sure where the object to lock is"); 3652 3653 // find a free slot in the monitor block 3654 3655 3656 // initialize entry pointer 3657 __ clr(O1); // points to free slot or NULL 3658 3659 { 3660 Label entry, loop, exit; 3661 __ add( __ top_most_monitor(), O2 ); // last one to check 3662 __ ba( entry ); 3663 __ delayed()->mov( Lmonitors, O3 ); // first one to check 3664 3665 3666 __ bind( loop ); 3667 3668 __ verify_oop(O4); // verify each monitor's oop 3669 __ tst(O4); // is this entry unused? 3670 __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1); 3671 3672 __ cmp(O4, O0); // check if current entry is for same object 3673 __ brx( Assembler::equal, false, Assembler::pn, exit ); 3674 __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one 3675 3676 __ bind( entry ); 3677 3678 __ cmp( O3, O2 ); 3679 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop ); 3680 __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4); 3681 3682 __ bind( exit ); 3683 } 3684 3685 { Label allocated; 3686 3687 // found free slot? 3688 __ br_notnull_short(O1, Assembler::pn, allocated); 3689 3690 __ add_monitor_to_stack( false, O2, O3 ); 3691 __ mov(Lmonitors, O1); 3692 3693 __ bind(allocated); 3694 } 3695 3696 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 3697 // The object has already been poped from the stack, so the expression stack looks correct. 3698 __ inc(Lbcp); 3699 3700 __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object 3701 __ lock_object(O1, O0); 3702 3703 // check if there's enough space on the stack for the monitors after locking 3704 __ generate_stack_overflow_check(0); 3705 3706 // The bcp has already been incremented. Just need to dispatch to next instruction. 3707 __ dispatch_next(vtos); 3708 } 3709 3710 3711 void TemplateTable::monitorexit() { 3712 transition(atos, vtos); 3713 __ verify_oop(Otos_i); 3714 __ tst(Otos_i); 3715 __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch ); 3716 3717 assert(O0 == Otos_i, "just checking"); 3718 3719 { Label entry, loop, found; 3720 __ add( __ top_most_monitor(), O2 ); // last one to check 3721 __ ba(entry); 3722 // use Lscratch to hold monitor elem to check, start with most recent monitor, 3723 // By using a local it survives the call to the C routine. 3724 __ delayed()->mov( Lmonitors, Lscratch ); 3725 3726 __ bind( loop ); 3727 3728 __ verify_oop(O4); // verify each monitor's oop 3729 __ cmp(O4, O0); // check if current entry is for desired object 3730 __ brx( Assembler::equal, true, Assembler::pt, found ); 3731 __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit 3732 3733 __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next 3734 3735 __ bind( entry ); 3736 3737 __ cmp( Lscratch, O2 ); 3738 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop ); 3739 __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4); 3740 3741 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 3742 __ should_not_reach_here(); 3743 3744 __ bind(found); 3745 } 3746 __ unlock_object(O1); 3747 } 3748 3749 3750 //---------------------------------------------------------------------------------------------------- 3751 // Wide instructions 3752 3753 void TemplateTable::wide() { 3754 transition(vtos, vtos); 3755 __ ldub(Lbcp, 1, G3_scratch);// get next bc 3756 __ sll(G3_scratch, LogBytesPerWord, G3_scratch); 3757 AddressLiteral ep(Interpreter::_wentry_point); 3758 __ set(ep, G4_scratch); 3759 __ ld_ptr(G4_scratch, G3_scratch, G3_scratch); 3760 __ jmp(G3_scratch, G0); 3761 __ delayed()->nop(); 3762 // Note: the Lbcp increment step is part of the individual wide bytecode implementations 3763 } 3764 3765 3766 //---------------------------------------------------------------------------------------------------- 3767 // Multi arrays 3768 3769 void TemplateTable::multianewarray() { 3770 transition(vtos, atos); 3771 // put ndims * wordSize into Lscratch 3772 __ ldub( Lbcp, 3, Lscratch); 3773 __ sll( Lscratch, Interpreter::logStackElementSize, Lscratch); 3774 // Lesp points past last_dim, so set to O1 to first_dim address 3775 __ add( Lesp, Lscratch, O1); 3776 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1); 3777 __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack 3778 }