1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interpreter/interpreter.hpp" 27 #include "interpreter/interpreterRuntime.hpp" 28 #include "interpreter/interp_masm.hpp" 29 #include "interpreter/templateTable.hpp" 30 #include "memory/universe.inline.hpp" 31 #include "oops/methodData.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/sharedRuntime.hpp" 36 #include "runtime/stubRoutines.hpp" 37 #include "runtime/synchronizer.hpp" 38 #include "utilities/macros.hpp" 39 40 #ifndef CC_INTERP 41 #define __ _masm-> 42 43 // Misc helpers 44 45 // Do an oop store like *(base + index + offset) = val 46 // index can be noreg, 47 static void do_oop_store(InterpreterMacroAssembler* _masm, 48 Register base, 49 Register index, 50 int offset, 51 Register val, 52 Register tmp, 53 BarrierSet::Name barrier, 54 bool precise) { 55 assert(tmp != val && tmp != base && tmp != index, "register collision"); 56 assert(index == noreg || offset == 0, "only one offset"); 57 switch (barrier) { 58 #if INCLUDE_ALL_GCS 59 case BarrierSet::G1SATBCTLogging: 60 { 61 // Load and record the previous value. 62 __ g1_write_barrier_pre(base, index, offset, 63 noreg /* pre_val */, 64 tmp, true /*preserve_o_regs*/); 65 66 // G1 barrier needs uncompressed oop for region cross check. 67 Register new_val = val; 68 if (UseCompressedOops && val != G0) { 69 new_val = tmp; 70 __ mov(val, new_val); 71 } 72 73 if (index == noreg ) { 74 assert(Assembler::is_simm13(offset), "fix this code"); 75 __ store_heap_oop(val, base, offset); 76 } else { 77 __ store_heap_oop(val, base, index); 78 } 79 80 // No need for post barrier if storing NULL 81 if (val != G0) { 82 if (precise) { 83 if (index == noreg) { 84 __ add(base, offset, base); 85 } else { 86 __ add(base, index, base); 87 } 88 } 89 __ g1_write_barrier_post(base, new_val, tmp); 90 } 91 } 92 break; 93 #endif // INCLUDE_ALL_GCS 94 case BarrierSet::CardTableModRef: 95 case BarrierSet::CardTableExtension: 96 { 97 if (index == noreg ) { 98 assert(Assembler::is_simm13(offset), "fix this code"); 99 __ store_heap_oop(val, base, offset); 100 } else { 101 __ store_heap_oop(val, base, index); 102 } 103 // No need for post barrier if storing NULL 104 if (val != G0) { 105 if (precise) { 106 if (index == noreg) { 107 __ add(base, offset, base); 108 } else { 109 __ add(base, index, base); 110 } 111 } 112 __ card_write_barrier_post(base, val, tmp); 113 } 114 } 115 break; 116 case BarrierSet::ModRef: 117 ShouldNotReachHere(); 118 break; 119 default : 120 ShouldNotReachHere(); 121 122 } 123 } 124 125 126 //---------------------------------------------------------------------------------------------------- 127 // Platform-dependent initialization 128 129 void TemplateTable::pd_initialize() { 130 // (none) 131 } 132 133 134 //---------------------------------------------------------------------------------------------------- 135 // Condition conversion 136 Assembler::Condition ccNot(TemplateTable::Condition cc) { 137 switch (cc) { 138 case TemplateTable::equal : return Assembler::notEqual; 139 case TemplateTable::not_equal : return Assembler::equal; 140 case TemplateTable::less : return Assembler::greaterEqual; 141 case TemplateTable::less_equal : return Assembler::greater; 142 case TemplateTable::greater : return Assembler::lessEqual; 143 case TemplateTable::greater_equal: return Assembler::less; 144 } 145 ShouldNotReachHere(); 146 return Assembler::zero; 147 } 148 149 //---------------------------------------------------------------------------------------------------- 150 // Miscelaneous helper routines 151 152 153 Address TemplateTable::at_bcp(int offset) { 154 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); 155 return Address(Lbcp, offset); 156 } 157 158 159 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, 160 Register temp_reg, bool load_bc_into_bc_reg/*=true*/, 161 int byte_no) { 162 // With sharing on, may need to test Method* flag. 163 if (!RewriteBytecodes) return; 164 Label L_patch_done; 165 166 switch (bc) { 167 case Bytecodes::_fast_aputfield: 168 case Bytecodes::_fast_bputfield: 169 case Bytecodes::_fast_cputfield: 170 case Bytecodes::_fast_dputfield: 171 case Bytecodes::_fast_fputfield: 172 case Bytecodes::_fast_iputfield: 173 case Bytecodes::_fast_lputfield: 174 case Bytecodes::_fast_sputfield: 175 { 176 // We skip bytecode quickening for putfield instructions when 177 // the put_code written to the constant pool cache is zero. 178 // This is required so that every execution of this instruction 179 // calls out to InterpreterRuntime::resolve_get_put to do 180 // additional, required work. 181 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 182 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 183 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1); 184 __ set(bc, bc_reg); 185 __ cmp_and_br_short(temp_reg, 0, Assembler::equal, Assembler::pn, L_patch_done); // don't patch 186 } 187 break; 188 default: 189 assert(byte_no == -1, "sanity"); 190 if (load_bc_into_bc_reg) { 191 __ set(bc, bc_reg); 192 } 193 } 194 195 if (JvmtiExport::can_post_breakpoint()) { 196 Label L_fast_patch; 197 __ ldub(at_bcp(0), temp_reg); 198 __ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch); 199 // perform the quickening, slowly, in the bowels of the breakpoint table 200 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg); 201 __ ba_short(L_patch_done); 202 __ bind(L_fast_patch); 203 } 204 205 #ifdef ASSERT 206 Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc); 207 Label L_okay; 208 __ ldub(at_bcp(0), temp_reg); 209 __ cmp(temp_reg, orig_bytecode); 210 __ br(Assembler::equal, false, Assembler::pt, L_okay); 211 __ delayed()->cmp(temp_reg, bc_reg); 212 __ br(Assembler::equal, false, Assembler::pt, L_okay); 213 __ delayed()->nop(); 214 __ stop("patching the wrong bytecode"); 215 __ bind(L_okay); 216 #endif 217 218 // patch bytecode 219 __ stb(bc_reg, at_bcp(0)); 220 __ bind(L_patch_done); 221 } 222 223 //---------------------------------------------------------------------------------------------------- 224 // Individual instructions 225 226 void TemplateTable::nop() { 227 transition(vtos, vtos); 228 // nothing to do 229 } 230 231 void TemplateTable::shouldnotreachhere() { 232 transition(vtos, vtos); 233 __ stop("shouldnotreachhere bytecode"); 234 } 235 236 void TemplateTable::aconst_null() { 237 transition(vtos, atos); 238 __ clr(Otos_i); 239 } 240 241 242 void TemplateTable::iconst(int value) { 243 transition(vtos, itos); 244 __ set(value, Otos_i); 245 } 246 247 248 void TemplateTable::lconst(int value) { 249 transition(vtos, ltos); 250 assert(value >= 0, "check this code"); 251 #ifdef _LP64 252 __ set(value, Otos_l); 253 #else 254 __ set(value, Otos_l2); 255 __ clr( Otos_l1); 256 #endif 257 } 258 259 260 void TemplateTable::fconst(int value) { 261 transition(vtos, ftos); 262 static float zero = 0.0, one = 1.0, two = 2.0; 263 float* p; 264 switch( value ) { 265 default: ShouldNotReachHere(); 266 case 0: p = &zero; break; 267 case 1: p = &one; break; 268 case 2: p = &two; break; 269 } 270 AddressLiteral a(p); 271 __ sethi(a, G3_scratch); 272 __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f); 273 } 274 275 276 void TemplateTable::dconst(int value) { 277 transition(vtos, dtos); 278 static double zero = 0.0, one = 1.0; 279 double* p; 280 switch( value ) { 281 default: ShouldNotReachHere(); 282 case 0: p = &zero; break; 283 case 1: p = &one; break; 284 } 285 AddressLiteral a(p); 286 __ sethi(a, G3_scratch); 287 __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d); 288 } 289 290 291 // %%%%% Should factore most snippet templates across platforms 292 293 void TemplateTable::bipush() { 294 transition(vtos, itos); 295 __ ldsb( at_bcp(1), Otos_i ); 296 } 297 298 void TemplateTable::sipush() { 299 transition(vtos, itos); 300 __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed); 301 } 302 303 void TemplateTable::ldc(bool wide) { 304 transition(vtos, vtos); 305 Label call_ldc, notInt, isString, notString, notClass, exit; 306 307 if (wide) { 308 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); 309 } else { 310 __ ldub(Lbcp, 1, O1); 311 } 312 __ get_cpool_and_tags(O0, O2); 313 314 const int base_offset = ConstantPool::header_size() * wordSize; 315 const int tags_offset = Array<u1>::base_offset_in_bytes(); 316 317 // get type from tags 318 __ add(O2, tags_offset, O2); 319 __ ldub(O2, O1, O2); 320 321 // unresolved class? If so, must resolve 322 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClass, Assembler::equal, Assembler::pt, call_ldc); 323 324 // unresolved class in error state 325 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClassInError, Assembler::equal, Assembler::pn, call_ldc); 326 327 __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class 328 __ brx(Assembler::notEqual, true, Assembler::pt, notClass); 329 __ delayed()->add(O0, base_offset, O0); 330 331 __ bind(call_ldc); 332 __ set(wide, O1); 333 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1); 334 __ push(atos); 335 __ ba_short(exit); 336 337 __ bind(notClass); 338 // __ add(O0, base_offset, O0); 339 __ sll(O1, LogBytesPerWord, O1); 340 __ cmp(O2, JVM_CONSTANT_Integer); 341 __ brx(Assembler::notEqual, true, Assembler::pt, notInt); 342 __ delayed()->cmp(O2, JVM_CONSTANT_String); 343 __ ld(O0, O1, Otos_i); 344 __ push(itos); 345 __ ba_short(exit); 346 347 __ bind(notInt); 348 // __ cmp(O2, JVM_CONSTANT_String); 349 __ brx(Assembler::notEqual, true, Assembler::pt, notString); 350 __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); 351 __ bind(isString); 352 __ stop("string should be rewritten to fast_aldc"); 353 __ ba_short(exit); 354 355 __ bind(notString); 356 // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); 357 __ push(ftos); 358 359 __ bind(exit); 360 } 361 362 // Fast path for caching oop constants. 363 // %%% We should use this to handle Class and String constants also. 364 // %%% It will simplify the ldc/primitive path considerably. 365 void TemplateTable::fast_aldc(bool wide) { 366 transition(vtos, atos); 367 368 int index_size = wide ? sizeof(u2) : sizeof(u1); 369 Label resolved; 370 371 // We are resolved if the resolved reference cache entry contains a 372 // non-null object (CallSite, etc.) 373 assert_different_registers(Otos_i, G3_scratch); 374 __ get_cache_index_at_bcp(Otos_i, G3_scratch, 1, index_size); // load index => G3_scratch 375 __ load_resolved_reference_at_index(Otos_i, G3_scratch); 376 __ tst(Otos_i); 377 __ br(Assembler::notEqual, false, Assembler::pt, resolved); 378 __ delayed()->set((int)bytecode(), O1); 379 380 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 381 382 // first time invocation - must resolve first 383 __ call_VM(Otos_i, entry, O1); 384 __ bind(resolved); 385 __ verify_oop(Otos_i); 386 } 387 388 389 void TemplateTable::ldc2_w() { 390 transition(vtos, vtos); 391 Label Long, exit; 392 393 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); 394 __ get_cpool_and_tags(O0, O2); 395 396 const int base_offset = ConstantPool::header_size() * wordSize; 397 const int tags_offset = Array<u1>::base_offset_in_bytes(); 398 // get type from tags 399 __ add(O2, tags_offset, O2); 400 __ ldub(O2, O1, O2); 401 402 __ sll(O1, LogBytesPerWord, O1); 403 __ add(O0, O1, G3_scratch); 404 405 __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, Long); 406 // A double can be placed at word-aligned locations in the constant pool. 407 // Check out Conversions.java for an example. 408 // Also ConstantPool::header_size() is 20, which makes it very difficult 409 // to double-align double on the constant pool. SG, 11/7/97 410 #ifdef _LP64 411 __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d); 412 #else 413 FloatRegister f = Ftos_d; 414 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f); 415 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2, 416 f->successor()); 417 #endif 418 __ push(dtos); 419 __ ba_short(exit); 420 421 __ bind(Long); 422 #ifdef _LP64 423 __ ldx(G3_scratch, base_offset, Otos_l); 424 #else 425 __ ld(G3_scratch, base_offset, Otos_l); 426 __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor()); 427 #endif 428 __ push(ltos); 429 430 __ bind(exit); 431 } 432 433 434 void TemplateTable::locals_index(Register reg, int offset) { 435 __ ldub( at_bcp(offset), reg ); 436 } 437 438 439 void TemplateTable::locals_index_wide(Register reg) { 440 // offset is 2, not 1, because Lbcp points to wide prefix code 441 __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned); 442 } 443 444 void TemplateTable::iload() { 445 transition(vtos, itos); 446 // Rewrite iload,iload pair into fast_iload2 447 // iload,caload pair into fast_icaload 448 if (RewriteFrequentPairs) { 449 Label rewrite, done; 450 451 // get next byte 452 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch); 453 454 // if _iload, wait to rewrite to iload2. We only want to rewrite the 455 // last two iloads in a pair. Comparing against fast_iload means that 456 // the next bytecode is neither an iload or a caload, and therefore 457 // an iload pair. 458 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_iload, Assembler::equal, Assembler::pn, done); 459 460 __ cmp(G3_scratch, (int)Bytecodes::_fast_iload); 461 __ br(Assembler::equal, false, Assembler::pn, rewrite); 462 __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch); 463 464 __ cmp(G3_scratch, (int)Bytecodes::_caload); 465 __ br(Assembler::equal, false, Assembler::pn, rewrite); 466 __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch); 467 468 __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again 469 // rewrite 470 // G4_scratch: fast bytecode 471 __ bind(rewrite); 472 patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false); 473 __ bind(done); 474 } 475 476 // Get the local value into tos 477 locals_index(G3_scratch); 478 __ access_local_int( G3_scratch, Otos_i ); 479 } 480 481 void TemplateTable::fast_iload2() { 482 transition(vtos, itos); 483 locals_index(G3_scratch); 484 __ access_local_int( G3_scratch, Otos_i ); 485 __ push_i(); 486 locals_index(G3_scratch, 3); // get next bytecode's local index. 487 __ access_local_int( G3_scratch, Otos_i ); 488 } 489 490 void TemplateTable::fast_iload() { 491 transition(vtos, itos); 492 locals_index(G3_scratch); 493 __ access_local_int( G3_scratch, Otos_i ); 494 } 495 496 void TemplateTable::lload() { 497 transition(vtos, ltos); 498 locals_index(G3_scratch); 499 __ access_local_long( G3_scratch, Otos_l ); 500 } 501 502 503 void TemplateTable::fload() { 504 transition(vtos, ftos); 505 locals_index(G3_scratch); 506 __ access_local_float( G3_scratch, Ftos_f ); 507 } 508 509 510 void TemplateTable::dload() { 511 transition(vtos, dtos); 512 locals_index(G3_scratch); 513 __ access_local_double( G3_scratch, Ftos_d ); 514 } 515 516 517 void TemplateTable::aload() { 518 transition(vtos, atos); 519 locals_index(G3_scratch); 520 __ access_local_ptr( G3_scratch, Otos_i); 521 } 522 523 524 void TemplateTable::wide_iload() { 525 transition(vtos, itos); 526 locals_index_wide(G3_scratch); 527 __ access_local_int( G3_scratch, Otos_i ); 528 } 529 530 531 void TemplateTable::wide_lload() { 532 transition(vtos, ltos); 533 locals_index_wide(G3_scratch); 534 __ access_local_long( G3_scratch, Otos_l ); 535 } 536 537 538 void TemplateTable::wide_fload() { 539 transition(vtos, ftos); 540 locals_index_wide(G3_scratch); 541 __ access_local_float( G3_scratch, Ftos_f ); 542 } 543 544 545 void TemplateTable::wide_dload() { 546 transition(vtos, dtos); 547 locals_index_wide(G3_scratch); 548 __ access_local_double( G3_scratch, Ftos_d ); 549 } 550 551 552 void TemplateTable::wide_aload() { 553 transition(vtos, atos); 554 locals_index_wide(G3_scratch); 555 __ access_local_ptr( G3_scratch, Otos_i ); 556 __ verify_oop(Otos_i); 557 } 558 559 560 void TemplateTable::iaload() { 561 transition(itos, itos); 562 // Otos_i: index 563 // tos: array 564 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3); 565 __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i); 566 } 567 568 569 void TemplateTable::laload() { 570 transition(itos, ltos); 571 // Otos_i: index 572 // O2: array 573 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3); 574 __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l); 575 } 576 577 578 void TemplateTable::faload() { 579 transition(itos, ftos); 580 // Otos_i: index 581 // O2: array 582 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3); 583 __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f); 584 } 585 586 587 void TemplateTable::daload() { 588 transition(itos, dtos); 589 // Otos_i: index 590 // O2: array 591 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3); 592 __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d); 593 } 594 595 596 void TemplateTable::aaload() { 597 transition(itos, atos); 598 // Otos_i: index 599 // tos: array 600 __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3); 601 __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i); 602 __ verify_oop(Otos_i); 603 } 604 605 606 void TemplateTable::baload() { 607 transition(itos, itos); 608 // Otos_i: index 609 // tos: array 610 __ index_check(O2, Otos_i, 0, G3_scratch, O3); 611 __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i); 612 } 613 614 615 void TemplateTable::caload() { 616 transition(itos, itos); 617 // Otos_i: index 618 // tos: array 619 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 620 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i); 621 } 622 623 void TemplateTable::fast_icaload() { 624 transition(vtos, itos); 625 // Otos_i: index 626 // tos: array 627 locals_index(G3_scratch); 628 __ access_local_int( G3_scratch, Otos_i ); 629 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 630 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i); 631 } 632 633 634 void TemplateTable::saload() { 635 transition(itos, itos); 636 // Otos_i: index 637 // tos: array 638 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 639 __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i); 640 } 641 642 643 void TemplateTable::iload(int n) { 644 transition(vtos, itos); 645 __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i ); 646 } 647 648 649 void TemplateTable::lload(int n) { 650 transition(vtos, ltos); 651 assert(n+1 < Argument::n_register_parameters, "would need more code"); 652 __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l); 653 } 654 655 656 void TemplateTable::fload(int n) { 657 transition(vtos, ftos); 658 assert(n < Argument::n_register_parameters, "would need more code"); 659 __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f ); 660 } 661 662 663 void TemplateTable::dload(int n) { 664 transition(vtos, dtos); 665 FloatRegister dst = Ftos_d; 666 __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst); 667 } 668 669 670 void TemplateTable::aload(int n) { 671 transition(vtos, atos); 672 __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i ); 673 } 674 675 676 void TemplateTable::aload_0() { 677 transition(vtos, atos); 678 679 // According to bytecode histograms, the pairs: 680 // 681 // _aload_0, _fast_igetfield (itos) 682 // _aload_0, _fast_agetfield (atos) 683 // _aload_0, _fast_fgetfield (ftos) 684 // 685 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0 686 // bytecode checks the next bytecode and then rewrites the current 687 // bytecode into a pair bytecode; otherwise it rewrites the current 688 // bytecode into _fast_aload_0 that doesn't do the pair check anymore. 689 // 690 if (RewriteFrequentPairs) { 691 Label rewrite, done; 692 693 // get next byte 694 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch); 695 696 // do actual aload_0 697 aload(0); 698 699 // if _getfield then wait with rewrite 700 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_getfield, Assembler::equal, Assembler::pn, done); 701 702 // if _igetfield then rewrite to _fast_iaccess_0 703 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 704 __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield); 705 __ br(Assembler::equal, false, Assembler::pn, rewrite); 706 __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch); 707 708 // if _agetfield then rewrite to _fast_aaccess_0 709 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 710 __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield); 711 __ br(Assembler::equal, false, Assembler::pn, rewrite); 712 __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch); 713 714 // if _fgetfield then rewrite to _fast_faccess_0 715 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 716 __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield); 717 __ br(Assembler::equal, false, Assembler::pn, rewrite); 718 __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch); 719 720 // else rewrite to _fast_aload0 721 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 722 __ set(Bytecodes::_fast_aload_0, G4_scratch); 723 724 // rewrite 725 // G4_scratch: fast bytecode 726 __ bind(rewrite); 727 patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false); 728 __ bind(done); 729 } else { 730 aload(0); 731 } 732 } 733 734 735 void TemplateTable::istore() { 736 transition(itos, vtos); 737 locals_index(G3_scratch); 738 __ store_local_int( G3_scratch, Otos_i ); 739 } 740 741 742 void TemplateTable::lstore() { 743 transition(ltos, vtos); 744 locals_index(G3_scratch); 745 __ store_local_long( G3_scratch, Otos_l ); 746 } 747 748 749 void TemplateTable::fstore() { 750 transition(ftos, vtos); 751 locals_index(G3_scratch); 752 __ store_local_float( G3_scratch, Ftos_f ); 753 } 754 755 756 void TemplateTable::dstore() { 757 transition(dtos, vtos); 758 locals_index(G3_scratch); 759 __ store_local_double( G3_scratch, Ftos_d ); 760 } 761 762 763 void TemplateTable::astore() { 764 transition(vtos, vtos); 765 __ load_ptr(0, Otos_i); 766 __ inc(Lesp, Interpreter::stackElementSize); 767 __ verify_oop_or_return_address(Otos_i, G3_scratch); 768 locals_index(G3_scratch); 769 __ store_local_ptr(G3_scratch, Otos_i); 770 } 771 772 773 void TemplateTable::wide_istore() { 774 transition(vtos, vtos); 775 __ pop_i(); 776 locals_index_wide(G3_scratch); 777 __ store_local_int( G3_scratch, Otos_i ); 778 } 779 780 781 void TemplateTable::wide_lstore() { 782 transition(vtos, vtos); 783 __ pop_l(); 784 locals_index_wide(G3_scratch); 785 __ store_local_long( G3_scratch, Otos_l ); 786 } 787 788 789 void TemplateTable::wide_fstore() { 790 transition(vtos, vtos); 791 __ pop_f(); 792 locals_index_wide(G3_scratch); 793 __ store_local_float( G3_scratch, Ftos_f ); 794 } 795 796 797 void TemplateTable::wide_dstore() { 798 transition(vtos, vtos); 799 __ pop_d(); 800 locals_index_wide(G3_scratch); 801 __ store_local_double( G3_scratch, Ftos_d ); 802 } 803 804 805 void TemplateTable::wide_astore() { 806 transition(vtos, vtos); 807 __ load_ptr(0, Otos_i); 808 __ inc(Lesp, Interpreter::stackElementSize); 809 __ verify_oop_or_return_address(Otos_i, G3_scratch); 810 locals_index_wide(G3_scratch); 811 __ store_local_ptr(G3_scratch, Otos_i); 812 } 813 814 815 void TemplateTable::iastore() { 816 transition(itos, vtos); 817 __ pop_i(O2); // index 818 // Otos_i: val 819 // O3: array 820 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2); 821 __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT)); 822 } 823 824 825 void TemplateTable::lastore() { 826 transition(ltos, vtos); 827 __ pop_i(O2); // index 828 // Otos_l: val 829 // O3: array 830 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2); 831 __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG)); 832 } 833 834 835 void TemplateTable::fastore() { 836 transition(ftos, vtos); 837 __ pop_i(O2); // index 838 // Ftos_f: val 839 // O3: array 840 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2); 841 __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT)); 842 } 843 844 845 void TemplateTable::dastore() { 846 transition(dtos, vtos); 847 __ pop_i(O2); // index 848 // Fos_d: val 849 // O3: array 850 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2); 851 __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)); 852 } 853 854 855 void TemplateTable::aastore() { 856 Label store_ok, is_null, done; 857 transition(vtos, vtos); 858 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); 859 __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index 860 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array 861 // Otos_i: val 862 // O2: index 863 // O3: array 864 __ verify_oop(Otos_i); 865 __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1); 866 867 // do array store check - check for NULL value first 868 __ br_null_short( Otos_i, Assembler::pn, is_null ); 869 870 __ load_klass(O3, O4); // get array klass 871 __ load_klass(Otos_i, O5); // get value klass 872 873 // do fast instanceof cache test 874 875 __ ld_ptr(O4, in_bytes(ObjArrayKlass::element_klass_offset()), O4); 876 877 assert(Otos_i == O0, "just checking"); 878 879 // Otos_i: value 880 // O1: addr - offset 881 // O2: index 882 // O3: array 883 // O4: array element klass 884 // O5: value klass 885 886 // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); 887 888 // Generate a fast subtype check. Branch to store_ok if no 889 // failure. Throw if failure. 890 __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok ); 891 892 // Not a subtype; so must throw exception 893 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch ); 894 895 // Store is OK. 896 __ bind(store_ok); 897 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true); 898 899 __ ba(done); 900 __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value) 901 902 __ bind(is_null); 903 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true); 904 905 __ profile_null_seen(G3_scratch); 906 __ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value) 907 __ bind(done); 908 } 909 910 911 void TemplateTable::bastore() { 912 transition(itos, vtos); 913 __ pop_i(O2); // index 914 // Otos_i: val 915 // O3: array 916 __ index_check(O3, O2, 0, G3_scratch, O2); 917 __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE)); 918 } 919 920 921 void TemplateTable::castore() { 922 transition(itos, vtos); 923 __ pop_i(O2); // index 924 // Otos_i: val 925 // O3: array 926 __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2); 927 __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR)); 928 } 929 930 931 void TemplateTable::sastore() { 932 // %%%%% Factor across platform 933 castore(); 934 } 935 936 937 void TemplateTable::istore(int n) { 938 transition(itos, vtos); 939 __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n)); 940 } 941 942 943 void TemplateTable::lstore(int n) { 944 transition(ltos, vtos); 945 assert(n+1 < Argument::n_register_parameters, "only handle register cases"); 946 __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1)); 947 948 } 949 950 951 void TemplateTable::fstore(int n) { 952 transition(ftos, vtos); 953 assert(n < Argument::n_register_parameters, "only handle register cases"); 954 __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n)); 955 } 956 957 958 void TemplateTable::dstore(int n) { 959 transition(dtos, vtos); 960 FloatRegister src = Ftos_d; 961 __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1)); 962 } 963 964 965 void TemplateTable::astore(int n) { 966 transition(vtos, vtos); 967 __ load_ptr(0, Otos_i); 968 __ inc(Lesp, Interpreter::stackElementSize); 969 __ verify_oop_or_return_address(Otos_i, G3_scratch); 970 __ store_local_ptr(n, Otos_i); 971 } 972 973 974 void TemplateTable::pop() { 975 transition(vtos, vtos); 976 __ inc(Lesp, Interpreter::stackElementSize); 977 } 978 979 980 void TemplateTable::pop2() { 981 transition(vtos, vtos); 982 __ inc(Lesp, 2 * Interpreter::stackElementSize); 983 } 984 985 986 void TemplateTable::dup() { 987 transition(vtos, vtos); 988 // stack: ..., a 989 // load a and tag 990 __ load_ptr(0, Otos_i); 991 __ push_ptr(Otos_i); 992 // stack: ..., a, a 993 } 994 995 996 void TemplateTable::dup_x1() { 997 transition(vtos, vtos); 998 // stack: ..., a, b 999 __ load_ptr( 1, G3_scratch); // get a 1000 __ load_ptr( 0, Otos_l1); // get b 1001 __ store_ptr(1, Otos_l1); // put b 1002 __ store_ptr(0, G3_scratch); // put a - like swap 1003 __ push_ptr(Otos_l1); // push b 1004 // stack: ..., b, a, b 1005 } 1006 1007 1008 void TemplateTable::dup_x2() { 1009 transition(vtos, vtos); 1010 // stack: ..., a, b, c 1011 // get c and push on stack, reuse registers 1012 __ load_ptr( 0, G3_scratch); // get c 1013 __ push_ptr(G3_scratch); // push c with tag 1014 // stack: ..., a, b, c, c (c in reg) (Lesp - 4) 1015 // (stack offsets n+1 now) 1016 __ load_ptr( 3, Otos_l1); // get a 1017 __ store_ptr(3, G3_scratch); // put c at 3 1018 // stack: ..., c, b, c, c (a in reg) 1019 __ load_ptr( 2, G3_scratch); // get b 1020 __ store_ptr(2, Otos_l1); // put a at 2 1021 // stack: ..., c, a, c, c (b in reg) 1022 __ store_ptr(1, G3_scratch); // put b at 1 1023 // stack: ..., c, a, b, c 1024 } 1025 1026 1027 void TemplateTable::dup2() { 1028 transition(vtos, vtos); 1029 __ load_ptr(1, G3_scratch); // get a 1030 __ load_ptr(0, Otos_l1); // get b 1031 __ push_ptr(G3_scratch); // push a 1032 __ push_ptr(Otos_l1); // push b 1033 // stack: ..., a, b, a, b 1034 } 1035 1036 1037 void TemplateTable::dup2_x1() { 1038 transition(vtos, vtos); 1039 // stack: ..., a, b, c 1040 __ load_ptr( 1, Lscratch); // get b 1041 __ load_ptr( 2, Otos_l1); // get a 1042 __ store_ptr(2, Lscratch); // put b at a 1043 // stack: ..., b, b, c 1044 __ load_ptr( 0, G3_scratch); // get c 1045 __ store_ptr(1, G3_scratch); // put c at b 1046 // stack: ..., b, c, c 1047 __ store_ptr(0, Otos_l1); // put a at c 1048 // stack: ..., b, c, a 1049 __ push_ptr(Lscratch); // push b 1050 __ push_ptr(G3_scratch); // push c 1051 // stack: ..., b, c, a, b, c 1052 } 1053 1054 1055 // The spec says that these types can be a mixture of category 1 (1 word) 1056 // types and/or category 2 types (long and doubles) 1057 void TemplateTable::dup2_x2() { 1058 transition(vtos, vtos); 1059 // stack: ..., a, b, c, d 1060 __ load_ptr( 1, Lscratch); // get c 1061 __ load_ptr( 3, Otos_l1); // get a 1062 __ store_ptr(3, Lscratch); // put c at 3 1063 __ store_ptr(1, Otos_l1); // put a at 1 1064 // stack: ..., c, b, a, d 1065 __ load_ptr( 2, G3_scratch); // get b 1066 __ load_ptr( 0, Otos_l1); // get d 1067 __ store_ptr(0, G3_scratch); // put b at 0 1068 __ store_ptr(2, Otos_l1); // put d at 2 1069 // stack: ..., c, d, a, b 1070 __ push_ptr(Lscratch); // push c 1071 __ push_ptr(Otos_l1); // push d 1072 // stack: ..., c, d, a, b, c, d 1073 } 1074 1075 1076 void TemplateTable::swap() { 1077 transition(vtos, vtos); 1078 // stack: ..., a, b 1079 __ load_ptr( 1, G3_scratch); // get a 1080 __ load_ptr( 0, Otos_l1); // get b 1081 __ store_ptr(0, G3_scratch); // put b 1082 __ store_ptr(1, Otos_l1); // put a 1083 // stack: ..., b, a 1084 } 1085 1086 1087 void TemplateTable::iop2(Operation op) { 1088 transition(itos, itos); 1089 __ pop_i(O1); 1090 switch (op) { 1091 case add: __ add(O1, Otos_i, Otos_i); break; 1092 case sub: __ sub(O1, Otos_i, Otos_i); break; 1093 // %%%%% Mul may not exist: better to call .mul? 1094 case mul: __ smul(O1, Otos_i, Otos_i); break; 1095 case _and: __ and3(O1, Otos_i, Otos_i); break; 1096 case _or: __ or3(O1, Otos_i, Otos_i); break; 1097 case _xor: __ xor3(O1, Otos_i, Otos_i); break; 1098 case shl: __ sll(O1, Otos_i, Otos_i); break; 1099 case shr: __ sra(O1, Otos_i, Otos_i); break; 1100 case ushr: __ srl(O1, Otos_i, Otos_i); break; 1101 default: ShouldNotReachHere(); 1102 } 1103 } 1104 1105 1106 void TemplateTable::lop2(Operation op) { 1107 transition(ltos, ltos); 1108 __ pop_l(O2); 1109 switch (op) { 1110 #ifdef _LP64 1111 case add: __ add(O2, Otos_l, Otos_l); break; 1112 case sub: __ sub(O2, Otos_l, Otos_l); break; 1113 case _and: __ and3(O2, Otos_l, Otos_l); break; 1114 case _or: __ or3(O2, Otos_l, Otos_l); break; 1115 case _xor: __ xor3(O2, Otos_l, Otos_l); break; 1116 #else 1117 case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break; 1118 case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break; 1119 case _and: __ and3(O3, Otos_l2, Otos_l2); __ and3(O2, Otos_l1, Otos_l1); break; 1120 case _or: __ or3(O3, Otos_l2, Otos_l2); __ or3(O2, Otos_l1, Otos_l1); break; 1121 case _xor: __ xor3(O3, Otos_l2, Otos_l2); __ xor3(O2, Otos_l1, Otos_l1); break; 1122 #endif 1123 default: ShouldNotReachHere(); 1124 } 1125 } 1126 1127 1128 void TemplateTable::idiv() { 1129 // %%%%% Later: ForSPARC/V7 call .sdiv library routine, 1130 // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe. 1131 1132 transition(itos, itos); 1133 __ pop_i(O1); // get 1st op 1134 1135 // Y contains upper 32 bits of result, set it to 0 or all ones 1136 __ wry(G0); 1137 __ mov(~0, G3_scratch); 1138 1139 __ tst(O1); 1140 Label neg; 1141 __ br(Assembler::negative, true, Assembler::pn, neg); 1142 __ delayed()->wry(G3_scratch); 1143 __ bind(neg); 1144 1145 Label ok; 1146 __ tst(Otos_i); 1147 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch ); 1148 1149 const int min_int = 0x80000000; 1150 Label regular; 1151 __ cmp(Otos_i, -1); 1152 __ br(Assembler::notEqual, false, Assembler::pt, regular); 1153 #ifdef _LP64 1154 // Don't put set in delay slot 1155 // Set will turn into multiple instructions in 64 bit mode 1156 __ delayed()->nop(); 1157 __ set(min_int, G4_scratch); 1158 #else 1159 __ delayed()->set(min_int, G4_scratch); 1160 #endif 1161 Label done; 1162 __ cmp(O1, G4_scratch); 1163 __ br(Assembler::equal, true, Assembler::pt, done); 1164 __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken) 1165 1166 __ bind(regular); 1167 __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction! 1168 __ bind(done); 1169 } 1170 1171 1172 void TemplateTable::irem() { 1173 transition(itos, itos); 1174 __ mov(Otos_i, O2); // save divisor 1175 idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1 1176 __ smul(Otos_i, O2, Otos_i); 1177 __ sub(O1, Otos_i, Otos_i); 1178 } 1179 1180 1181 void TemplateTable::lmul() { 1182 transition(ltos, ltos); 1183 __ pop_l(O2); 1184 #ifdef _LP64 1185 __ mulx(Otos_l, O2, Otos_l); 1186 #else 1187 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul)); 1188 #endif 1189 1190 } 1191 1192 1193 void TemplateTable::ldiv() { 1194 transition(ltos, ltos); 1195 1196 // check for zero 1197 __ pop_l(O2); 1198 #ifdef _LP64 1199 __ tst(Otos_l); 1200 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1201 __ sdivx(O2, Otos_l, Otos_l); 1202 #else 1203 __ orcc(Otos_l1, Otos_l2, G0); 1204 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1205 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv)); 1206 #endif 1207 } 1208 1209 1210 void TemplateTable::lrem() { 1211 transition(ltos, ltos); 1212 1213 // check for zero 1214 __ pop_l(O2); 1215 #ifdef _LP64 1216 __ tst(Otos_l); 1217 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1218 __ sdivx(O2, Otos_l, Otos_l2); 1219 __ mulx (Otos_l2, Otos_l, Otos_l2); 1220 __ sub (O2, Otos_l2, Otos_l); 1221 #else 1222 __ orcc(Otos_l1, Otos_l2, G0); 1223 __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1224 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem)); 1225 #endif 1226 } 1227 1228 1229 void TemplateTable::lshl() { 1230 transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra 1231 1232 __ pop_l(O2); // shift value in O2, O3 1233 #ifdef _LP64 1234 __ sllx(O2, Otos_i, Otos_l); 1235 #else 1236 __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4); 1237 #endif 1238 } 1239 1240 1241 void TemplateTable::lshr() { 1242 transition(itos, ltos); // %%%% see lshl comment 1243 1244 __ pop_l(O2); // shift value in O2, O3 1245 #ifdef _LP64 1246 __ srax(O2, Otos_i, Otos_l); 1247 #else 1248 __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4); 1249 #endif 1250 } 1251 1252 1253 1254 void TemplateTable::lushr() { 1255 transition(itos, ltos); // %%%% see lshl comment 1256 1257 __ pop_l(O2); // shift value in O2, O3 1258 #ifdef _LP64 1259 __ srlx(O2, Otos_i, Otos_l); 1260 #else 1261 __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4); 1262 #endif 1263 } 1264 1265 1266 void TemplateTable::fop2(Operation op) { 1267 transition(ftos, ftos); 1268 switch (op) { 1269 case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1270 case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1271 case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1272 case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1273 case rem: 1274 assert(Ftos_f == F0, "just checking"); 1275 #ifdef _LP64 1276 // LP64 calling conventions use F1, F3 for passing 2 floats 1277 __ pop_f(F1); 1278 __ fmov(FloatRegisterImpl::S, Ftos_f, F3); 1279 #else 1280 __ pop_i(O0); 1281 __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp); 1282 __ ld( __ d_tmp, O1 ); 1283 #endif 1284 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1285 assert( Ftos_f == F0, "fix this code" ); 1286 break; 1287 1288 default: ShouldNotReachHere(); 1289 } 1290 } 1291 1292 1293 void TemplateTable::dop2(Operation op) { 1294 transition(dtos, dtos); 1295 switch (op) { 1296 case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1297 case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1298 case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1299 case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1300 case rem: 1301 #ifdef _LP64 1302 // Pass arguments in D0, D2 1303 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 ); 1304 __ pop_d( F0 ); 1305 #else 1306 // Pass arguments in O0O1, O2O3 1307 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp); 1308 __ ldd( __ d_tmp, O2 ); 1309 __ pop_d(Ftos_f); 1310 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp); 1311 __ ldd( __ d_tmp, O0 ); 1312 #endif 1313 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1314 assert( Ftos_d == F0, "fix this code" ); 1315 break; 1316 1317 default: ShouldNotReachHere(); 1318 } 1319 } 1320 1321 1322 void TemplateTable::ineg() { 1323 transition(itos, itos); 1324 __ neg(Otos_i); 1325 } 1326 1327 1328 void TemplateTable::lneg() { 1329 transition(ltos, ltos); 1330 #ifdef _LP64 1331 __ sub(G0, Otos_l, Otos_l); 1332 #else 1333 __ lneg(Otos_l1, Otos_l2); 1334 #endif 1335 } 1336 1337 1338 void TemplateTable::fneg() { 1339 transition(ftos, ftos); 1340 __ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f); 1341 } 1342 1343 1344 void TemplateTable::dneg() { 1345 transition(dtos, dtos); 1346 __ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f); 1347 } 1348 1349 1350 void TemplateTable::iinc() { 1351 transition(vtos, vtos); 1352 locals_index(G3_scratch); 1353 __ ldsb(Lbcp, 2, O2); // load constant 1354 __ access_local_int(G3_scratch, Otos_i); 1355 __ add(Otos_i, O2, Otos_i); 1356 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch 1357 } 1358 1359 1360 void TemplateTable::wide_iinc() { 1361 transition(vtos, vtos); 1362 locals_index_wide(G3_scratch); 1363 __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed); 1364 __ access_local_int(G3_scratch, Otos_i); 1365 __ add(Otos_i, O3, Otos_i); 1366 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch 1367 } 1368 1369 1370 void TemplateTable::convert() { 1371 // %%%%% Factor this first part accross platforms 1372 #ifdef ASSERT 1373 TosState tos_in = ilgl; 1374 TosState tos_out = ilgl; 1375 switch (bytecode()) { 1376 case Bytecodes::_i2l: // fall through 1377 case Bytecodes::_i2f: // fall through 1378 case Bytecodes::_i2d: // fall through 1379 case Bytecodes::_i2b: // fall through 1380 case Bytecodes::_i2c: // fall through 1381 case Bytecodes::_i2s: tos_in = itos; break; 1382 case Bytecodes::_l2i: // fall through 1383 case Bytecodes::_l2f: // fall through 1384 case Bytecodes::_l2d: tos_in = ltos; break; 1385 case Bytecodes::_f2i: // fall through 1386 case Bytecodes::_f2l: // fall through 1387 case Bytecodes::_f2d: tos_in = ftos; break; 1388 case Bytecodes::_d2i: // fall through 1389 case Bytecodes::_d2l: // fall through 1390 case Bytecodes::_d2f: tos_in = dtos; break; 1391 default : ShouldNotReachHere(); 1392 } 1393 switch (bytecode()) { 1394 case Bytecodes::_l2i: // fall through 1395 case Bytecodes::_f2i: // fall through 1396 case Bytecodes::_d2i: // fall through 1397 case Bytecodes::_i2b: // fall through 1398 case Bytecodes::_i2c: // fall through 1399 case Bytecodes::_i2s: tos_out = itos; break; 1400 case Bytecodes::_i2l: // fall through 1401 case Bytecodes::_f2l: // fall through 1402 case Bytecodes::_d2l: tos_out = ltos; break; 1403 case Bytecodes::_i2f: // fall through 1404 case Bytecodes::_l2f: // fall through 1405 case Bytecodes::_d2f: tos_out = ftos; break; 1406 case Bytecodes::_i2d: // fall through 1407 case Bytecodes::_l2d: // fall through 1408 case Bytecodes::_f2d: tos_out = dtos; break; 1409 default : ShouldNotReachHere(); 1410 } 1411 transition(tos_in, tos_out); 1412 #endif 1413 1414 1415 // Conversion 1416 Label done; 1417 switch (bytecode()) { 1418 case Bytecodes::_i2l: 1419 #ifdef _LP64 1420 // Sign extend the 32 bits 1421 __ sra ( Otos_i, 0, Otos_l ); 1422 #else 1423 __ addcc(Otos_i, 0, Otos_l2); 1424 __ br(Assembler::greaterEqual, true, Assembler::pt, done); 1425 __ delayed()->clr(Otos_l1); 1426 __ set(~0, Otos_l1); 1427 #endif 1428 break; 1429 1430 case Bytecodes::_i2f: 1431 __ st(Otos_i, __ d_tmp ); 1432 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); 1433 __ fitof(FloatRegisterImpl::S, F0, Ftos_f); 1434 break; 1435 1436 case Bytecodes::_i2d: 1437 __ st(Otos_i, __ d_tmp); 1438 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); 1439 __ fitof(FloatRegisterImpl::D, F0, Ftos_f); 1440 break; 1441 1442 case Bytecodes::_i2b: 1443 __ sll(Otos_i, 24, Otos_i); 1444 __ sra(Otos_i, 24, Otos_i); 1445 break; 1446 1447 case Bytecodes::_i2c: 1448 __ sll(Otos_i, 16, Otos_i); 1449 __ srl(Otos_i, 16, Otos_i); 1450 break; 1451 1452 case Bytecodes::_i2s: 1453 __ sll(Otos_i, 16, Otos_i); 1454 __ sra(Otos_i, 16, Otos_i); 1455 break; 1456 1457 case Bytecodes::_l2i: 1458 #ifndef _LP64 1459 __ mov(Otos_l2, Otos_i); 1460 #else 1461 // Sign-extend into the high 32 bits 1462 __ sra(Otos_l, 0, Otos_i); 1463 #endif 1464 break; 1465 1466 case Bytecodes::_l2f: 1467 case Bytecodes::_l2d: 1468 __ st_long(Otos_l, __ d_tmp); 1469 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d); 1470 1471 if (bytecode() == Bytecodes::_l2f) { 1472 __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f); 1473 } else { 1474 __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d); 1475 } 1476 break; 1477 1478 case Bytecodes::_f2i: { 1479 Label isNaN; 1480 // result must be 0 if value is NaN; test by comparing value to itself 1481 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f); 1482 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN); 1483 __ delayed()->clr(Otos_i); // NaN 1484 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30); 1485 __ stf(FloatRegisterImpl::S, F30, __ d_tmp); 1486 __ ld(__ d_tmp, Otos_i); 1487 __ bind(isNaN); 1488 } 1489 break; 1490 1491 case Bytecodes::_f2l: 1492 // must uncache tos 1493 __ push_f(); 1494 #ifdef _LP64 1495 __ pop_f(F1); 1496 #else 1497 __ pop_i(O0); 1498 #endif 1499 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l)); 1500 break; 1501 1502 case Bytecodes::_f2d: 1503 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f); 1504 break; 1505 1506 case Bytecodes::_d2i: 1507 case Bytecodes::_d2l: 1508 // must uncache tos 1509 __ push_d(); 1510 #ifdef _LP64 1511 // LP64 calling conventions pass first double arg in D0 1512 __ pop_d( Ftos_d ); 1513 #else 1514 __ pop_i( O0 ); 1515 __ pop_i( O1 ); 1516 #endif 1517 __ call_VM_leaf(Lscratch, 1518 bytecode() == Bytecodes::_d2i 1519 ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i) 1520 : CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); 1521 break; 1522 1523 case Bytecodes::_d2f: 1524 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f); 1525 break; 1526 1527 default: ShouldNotReachHere(); 1528 } 1529 __ bind(done); 1530 } 1531 1532 1533 void TemplateTable::lcmp() { 1534 transition(ltos, itos); 1535 1536 #ifdef _LP64 1537 __ pop_l(O1); // pop off value 1, value 2 is in O0 1538 __ lcmp( O1, Otos_l, Otos_i ); 1539 #else 1540 __ pop_l(O2); // cmp O2,3 to O0,1 1541 __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i ); 1542 #endif 1543 } 1544 1545 1546 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1547 1548 if (is_float) __ pop_f(F2); 1549 else __ pop_d(F2); 1550 1551 assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:"); 1552 1553 __ float_cmp( is_float, unordered_result, F2, F0, Otos_i ); 1554 } 1555 1556 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1557 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1558 __ verify_thread(); 1559 1560 const Register O2_bumped_count = O2; 1561 __ profile_taken_branch(G3_scratch, O2_bumped_count); 1562 1563 // get (wide) offset to O1_disp 1564 const Register O1_disp = O1; 1565 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC); 1566 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC); 1567 1568 // Handle all the JSR stuff here, then exit. 1569 // It's much shorter and cleaner than intermingling with the 1570 // non-JSR normal-branch stuff occurring below. 1571 if( is_jsr ) { 1572 // compute return address as bci in Otos_i 1573 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1574 __ sub(Lbcp, G3_scratch, G3_scratch); 1575 __ sub(G3_scratch, in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3), Otos_i); 1576 1577 // Bump Lbcp to target of JSR 1578 __ add(Lbcp, O1_disp, Lbcp); 1579 // Push returnAddress for "ret" on stack 1580 __ push_ptr(Otos_i); 1581 // And away we go! 1582 __ dispatch_next(vtos); 1583 return; 1584 } 1585 1586 // Normal (non-jsr) branch handling 1587 1588 // Save the current Lbcp 1589 const Register l_cur_bcp = Lscratch; 1590 __ mov( Lbcp, l_cur_bcp ); 1591 1592 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1593 if ( increment_invocation_counter_for_backward_branches ) { 1594 Label Lforward; 1595 // check branch direction 1596 __ br( Assembler::positive, false, Assembler::pn, Lforward ); 1597 // Bump bytecode pointer by displacement (take the branch) 1598 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr 1599 1600 const Register G3_method_counters = G3_scratch; 1601 __ get_method_counters(Lmethod, G3_method_counters, Lforward); 1602 1603 if (TieredCompilation) { 1604 Label Lno_mdo, Loverflow; 1605 int increment = InvocationCounter::count_increment; 1606 if (ProfileInterpreter) { 1607 // If no method data exists, go to profile_continue. 1608 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch); 1609 __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo); 1610 1611 // Increment backedge counter in the MDO 1612 Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) + 1613 in_bytes(InvocationCounter::counter_offset())); 1614 Address mask(G4_scratch, in_bytes(MethodData::backedge_mask_offset())); 1615 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0, 1616 Assembler::notZero, &Lforward); 1617 __ ba_short(Loverflow); 1618 } 1619 1620 // If there's no MDO, increment counter in MethodCounters* 1621 __ bind(Lno_mdo); 1622 Address backedge_counter(G3_method_counters, 1623 in_bytes(MethodCounters::backedge_counter_offset()) + 1624 in_bytes(InvocationCounter::counter_offset())); 1625 Address mask(G3_method_counters, in_bytes(MethodCounters::backedge_mask_offset())); 1626 __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0, 1627 Assembler::notZero, &Lforward); 1628 __ bind(Loverflow); 1629 1630 // notify point for loop, pass branch bytecode 1631 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), l_cur_bcp); 1632 1633 // Was an OSR adapter generated? 1634 // O0 = osr nmethod 1635 __ br_null_short(O0, Assembler::pn, Lforward); 1636 1637 // Has the nmethod been invalidated already? 1638 __ ldub(O0, nmethod::state_offset(), O2); 1639 __ cmp_and_br_short(O2, nmethod::in_use, Assembler::notEqual, Assembler::pn, Lforward); 1640 1641 // migrate the interpreter frame off of the stack 1642 1643 __ mov(G2_thread, L7); 1644 // save nmethod 1645 __ mov(O0, L6); 1646 __ set_last_Java_frame(SP, noreg); 1647 __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); 1648 __ reset_last_Java_frame(); 1649 __ mov(L7, G2_thread); 1650 1651 // move OSR nmethod to I1 1652 __ mov(L6, I1); 1653 1654 // OSR buffer to I0 1655 __ mov(O0, I0); 1656 1657 // remove the interpreter frame 1658 __ restore(I5_savedSP, 0, SP); 1659 1660 // Jump to the osr code. 1661 __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); 1662 __ jmp(O2, G0); 1663 __ delayed()->nop(); 1664 1665 } else { // not TieredCompilation 1666 // Update Backedge branch separately from invocations 1667 const Register G4_invoke_ctr = G4; 1668 __ increment_backedge_counter(G3_method_counters, G4_invoke_ctr, G1_scratch); 1669 if (ProfileInterpreter) { 1670 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_method_counters, G1_scratch, Lforward); 1671 if (UseOnStackReplacement) { 1672 1673 __ test_backedge_count_for_osr(O2_bumped_count, G3_method_counters, l_cur_bcp, G1_scratch); 1674 } 1675 } else { 1676 if (UseOnStackReplacement) { 1677 __ test_backedge_count_for_osr(G4_invoke_ctr, G3_method_counters, l_cur_bcp, G1_scratch); 1678 } 1679 } 1680 } 1681 1682 __ bind(Lforward); 1683 } else 1684 // Bump bytecode pointer by displacement (take the branch) 1685 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr 1686 1687 // continue with bytecode @ target 1688 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above, 1689 // %%%%% and changing dispatch_next to dispatch_only 1690 __ dispatch_next(vtos); 1691 } 1692 1693 1694 // Note Condition in argument is TemplateTable::Condition 1695 // arg scope is within class scope 1696 1697 void TemplateTable::if_0cmp(Condition cc) { 1698 // no pointers, integer only! 1699 transition(itos, vtos); 1700 // assume branch is more often taken than not (loops use backward branches) 1701 __ cmp( Otos_i, 0); 1702 __ if_cmp(ccNot(cc), false); 1703 } 1704 1705 1706 void TemplateTable::if_icmp(Condition cc) { 1707 transition(itos, vtos); 1708 __ pop_i(O1); 1709 __ cmp(O1, Otos_i); 1710 __ if_cmp(ccNot(cc), false); 1711 } 1712 1713 1714 void TemplateTable::if_nullcmp(Condition cc) { 1715 transition(atos, vtos); 1716 __ tst(Otos_i); 1717 __ if_cmp(ccNot(cc), true); 1718 } 1719 1720 1721 void TemplateTable::if_acmp(Condition cc) { 1722 transition(atos, vtos); 1723 __ pop_ptr(O1); 1724 __ verify_oop(O1); 1725 __ verify_oop(Otos_i); 1726 __ cmp(O1, Otos_i); 1727 __ if_cmp(ccNot(cc), true); 1728 } 1729 1730 1731 1732 void TemplateTable::ret() { 1733 transition(vtos, vtos); 1734 locals_index(G3_scratch); 1735 __ access_local_returnAddress(G3_scratch, Otos_i); 1736 // Otos_i contains the bci, compute the bcp from that 1737 1738 #ifdef _LP64 1739 #ifdef ASSERT 1740 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC 1741 // the result. The return address (really a BCI) was stored with an 1742 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in 1743 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit 1744 // loaded value. 1745 { Label zzz ; 1746 __ set (65536, G3_scratch) ; 1747 __ cmp (Otos_i, G3_scratch) ; 1748 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz); 1749 __ delayed()->nop(); 1750 __ stop("BCI is in the wrong register half?"); 1751 __ bind (zzz) ; 1752 } 1753 #endif 1754 #endif 1755 1756 __ profile_ret(vtos, Otos_i, G4_scratch); 1757 1758 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1759 __ add(G3_scratch, Otos_i, G3_scratch); 1760 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp); 1761 __ dispatch_next(vtos); 1762 } 1763 1764 1765 void TemplateTable::wide_ret() { 1766 transition(vtos, vtos); 1767 locals_index_wide(G3_scratch); 1768 __ access_local_returnAddress(G3_scratch, Otos_i); 1769 // Otos_i contains the bci, compute the bcp from that 1770 1771 __ profile_ret(vtos, Otos_i, G4_scratch); 1772 1773 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1774 __ add(G3_scratch, Otos_i, G3_scratch); 1775 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp); 1776 __ dispatch_next(vtos); 1777 } 1778 1779 1780 void TemplateTable::tableswitch() { 1781 transition(itos, vtos); 1782 Label default_case, continue_execution; 1783 1784 // align bcp 1785 __ add(Lbcp, BytesPerInt, O1); 1786 __ and3(O1, -BytesPerInt, O1); 1787 // load lo, hi 1788 __ ld(O1, 1 * BytesPerInt, O2); // Low Byte 1789 __ ld(O1, 2 * BytesPerInt, O3); // High Byte 1790 #ifdef _LP64 1791 // Sign extend the 32 bits 1792 __ sra ( Otos_i, 0, Otos_i ); 1793 #endif /* _LP64 */ 1794 1795 // check against lo & hi 1796 __ cmp( Otos_i, O2); 1797 __ br( Assembler::less, false, Assembler::pn, default_case); 1798 __ delayed()->cmp( Otos_i, O3 ); 1799 __ br( Assembler::greater, false, Assembler::pn, default_case); 1800 // lookup dispatch offset 1801 __ delayed()->sub(Otos_i, O2, O2); 1802 __ profile_switch_case(O2, O3, G3_scratch, G4_scratch); 1803 __ sll(O2, LogBytesPerInt, O2); 1804 __ add(O2, 3 * BytesPerInt, O2); 1805 __ ba(continue_execution); 1806 __ delayed()->ld(O1, O2, O2); 1807 // handle default 1808 __ bind(default_case); 1809 __ profile_switch_default(O3); 1810 __ ld(O1, 0, O2); // get default offset 1811 // continue execution 1812 __ bind(continue_execution); 1813 __ add(Lbcp, O2, Lbcp); 1814 __ dispatch_next(vtos); 1815 } 1816 1817 1818 void TemplateTable::lookupswitch() { 1819 transition(itos, itos); 1820 __ stop("lookupswitch bytecode should have been rewritten"); 1821 } 1822 1823 void TemplateTable::fast_linearswitch() { 1824 transition(itos, vtos); 1825 Label loop_entry, loop, found, continue_execution; 1826 // align bcp 1827 __ add(Lbcp, BytesPerInt, O1); 1828 __ and3(O1, -BytesPerInt, O1); 1829 // set counter 1830 __ ld(O1, BytesPerInt, O2); 1831 __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs 1832 __ add(O1, 2 * BytesPerInt, O3); // set first pair addr 1833 __ ba(loop_entry); 1834 __ delayed()->add(O3, O2, O2); // counter now points past last pair 1835 1836 // table search 1837 __ bind(loop); 1838 __ cmp(O4, Otos_i); 1839 __ br(Assembler::equal, true, Assembler::pn, found); 1840 __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4 1841 __ inc(O3, 2 * BytesPerInt); 1842 1843 __ bind(loop_entry); 1844 __ cmp(O2, O3); 1845 __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop); 1846 __ delayed()->ld(O3, 0, O4); 1847 1848 // default case 1849 __ ld(O1, 0, O4); // get default offset 1850 if (ProfileInterpreter) { 1851 __ profile_switch_default(O3); 1852 __ ba_short(continue_execution); 1853 } 1854 1855 // entry found -> get offset 1856 __ bind(found); 1857 if (ProfileInterpreter) { 1858 __ sub(O3, O1, O3); 1859 __ sub(O3, 2*BytesPerInt, O3); 1860 __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs 1861 __ profile_switch_case(O3, O1, O2, G3_scratch); 1862 1863 __ bind(continue_execution); 1864 } 1865 __ add(Lbcp, O4, Lbcp); 1866 __ dispatch_next(vtos); 1867 } 1868 1869 1870 void TemplateTable::fast_binaryswitch() { 1871 transition(itos, vtos); 1872 // Implementation using the following core algorithm: (copied from Intel) 1873 // 1874 // int binary_search(int key, LookupswitchPair* array, int n) { 1875 // // Binary search according to "Methodik des Programmierens" by 1876 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 1877 // int i = 0; 1878 // int j = n; 1879 // while (i+1 < j) { 1880 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 1881 // // with Q: for all i: 0 <= i < n: key < a[i] 1882 // // where a stands for the array and assuming that the (inexisting) 1883 // // element a[n] is infinitely big. 1884 // int h = (i + j) >> 1; 1885 // // i < h < j 1886 // if (key < array[h].fast_match()) { 1887 // j = h; 1888 // } else { 1889 // i = h; 1890 // } 1891 // } 1892 // // R: a[i] <= key < a[i+1] or Q 1893 // // (i.e., if key is within array, i is the correct index) 1894 // return i; 1895 // } 1896 1897 // register allocation 1898 assert(Otos_i == O0, "alias checking"); 1899 const Register Rkey = Otos_i; // already set (tosca) 1900 const Register Rarray = O1; 1901 const Register Ri = O2; 1902 const Register Rj = O3; 1903 const Register Rh = O4; 1904 const Register Rscratch = O5; 1905 1906 const int log_entry_size = 3; 1907 const int entry_size = 1 << log_entry_size; 1908 1909 Label found; 1910 // Find Array start 1911 __ add(Lbcp, 3 * BytesPerInt, Rarray); 1912 __ and3(Rarray, -BytesPerInt, Rarray); 1913 // initialize i & j (in delay slot) 1914 __ clr( Ri ); 1915 1916 // and start 1917 Label entry; 1918 __ ba(entry); 1919 __ delayed()->ld( Rarray, -BytesPerInt, Rj); 1920 // (Rj is already in the native byte-ordering.) 1921 1922 // binary search loop 1923 { Label loop; 1924 __ bind( loop ); 1925 // int h = (i + j) >> 1; 1926 __ sra( Rh, 1, Rh ); 1927 // if (key < array[h].fast_match()) { 1928 // j = h; 1929 // } else { 1930 // i = h; 1931 // } 1932 __ sll( Rh, log_entry_size, Rscratch ); 1933 __ ld( Rarray, Rscratch, Rscratch ); 1934 // (Rscratch is already in the native byte-ordering.) 1935 __ cmp( Rkey, Rscratch ); 1936 __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match()) 1937 __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match()) 1938 1939 // while (i+1 < j) 1940 __ bind( entry ); 1941 __ add( Ri, 1, Rscratch ); 1942 __ cmp(Rscratch, Rj); 1943 __ br( Assembler::less, true, Assembler::pt, loop ); 1944 __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1; 1945 } 1946 1947 // end of binary search, result index is i (must check again!) 1948 Label default_case; 1949 Label continue_execution; 1950 if (ProfileInterpreter) { 1951 __ mov( Ri, Rh ); // Save index in i for profiling 1952 } 1953 __ sll( Ri, log_entry_size, Ri ); 1954 __ ld( Rarray, Ri, Rscratch ); 1955 // (Rscratch is already in the native byte-ordering.) 1956 __ cmp( Rkey, Rscratch ); 1957 __ br( Assembler::notEqual, true, Assembler::pn, default_case ); 1958 __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j 1959 1960 // entry found -> j = offset 1961 __ inc( Ri, BytesPerInt ); 1962 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 1963 __ ld( Rarray, Ri, Rj ); 1964 // (Rj is already in the native byte-ordering.) 1965 1966 if (ProfileInterpreter) { 1967 __ ba_short(continue_execution); 1968 } 1969 1970 __ bind(default_case); // fall through (if not profiling) 1971 __ profile_switch_default(Ri); 1972 1973 __ bind(continue_execution); 1974 __ add( Lbcp, Rj, Lbcp ); 1975 __ dispatch_next( vtos ); 1976 } 1977 1978 1979 void TemplateTable::_return(TosState state) { 1980 transition(state, state); 1981 assert(_desc->calls_vm(), "inconsistent calls_vm information"); 1982 1983 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 1984 assert(state == vtos, "only valid state"); 1985 __ mov(G0, G3_scratch); 1986 __ access_local_ptr(G3_scratch, Otos_i); 1987 __ load_klass(Otos_i, O2); 1988 __ set(JVM_ACC_HAS_FINALIZER, G3); 1989 __ ld(O2, in_bytes(Klass::access_flags_offset()), O2); 1990 __ andcc(G3, O2, G0); 1991 Label skip_register_finalizer; 1992 __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer); 1993 __ delayed()->nop(); 1994 1995 // Call out to do finalizer registration 1996 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i); 1997 1998 __ bind(skip_register_finalizer); 1999 } 2000 2001 __ remove_activation(state, /* throw_monitor_exception */ true); 2002 2003 // The caller's SP was adjusted upon method entry to accomodate 2004 // the callee's non-argument locals. Undo that adjustment. 2005 __ ret(); // return to caller 2006 __ delayed()->restore(I5_savedSP, G0, SP); 2007 } 2008 2009 2010 // ---------------------------------------------------------------------------- 2011 // Volatile variables demand their effects be made known to all CPU's in 2012 // order. Store buffers on most chips allow reads & writes to reorder; the 2013 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of 2014 // memory barrier (i.e., it's not sufficient that the interpreter does not 2015 // reorder volatile references, the hardware also must not reorder them). 2016 // 2017 // According to the new Java Memory Model (JMM): 2018 // (1) All volatiles are serialized wrt to each other. 2019 // ALSO reads & writes act as aquire & release, so: 2020 // (2) A read cannot let unrelated NON-volatile memory refs that happen after 2021 // the read float up to before the read. It's OK for non-volatile memory refs 2022 // that happen before the volatile read to float down below it. 2023 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs 2024 // that happen BEFORE the write float down to after the write. It's OK for 2025 // non-volatile memory refs that happen after the volatile write to float up 2026 // before it. 2027 // 2028 // We only put in barriers around volatile refs (they are expensive), not 2029 // _between_ memory refs (that would require us to track the flavor of the 2030 // previous memory refs). Requirements (2) and (3) require some barriers 2031 // before volatile stores and after volatile loads. These nearly cover 2032 // requirement (1) but miss the volatile-store-volatile-load case. This final 2033 // case is placed after volatile-stores although it could just as well go 2034 // before volatile-loads. 2035 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) { 2036 // Helper function to insert a is-volatile test and memory barrier 2037 // All current sparc implementations run in TSO, needing only StoreLoad 2038 if ((order_constraint & Assembler::StoreLoad) == 0) return; 2039 __ membar( order_constraint ); 2040 } 2041 2042 // ---------------------------------------------------------------------------- 2043 void TemplateTable::resolve_cache_and_index(int byte_no, 2044 Register Rcache, 2045 Register index, 2046 size_t index_size) { 2047 // Depends on cpCacheOop layout! 2048 Label resolved; 2049 2050 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2051 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size); 2052 __ cmp(Lbyte_code, (int) bytecode()); // have we resolved this bytecode? 2053 __ br(Assembler::equal, false, Assembler::pt, resolved); 2054 __ delayed()->set((int)bytecode(), O1); 2055 2056 address entry; 2057 switch (bytecode()) { 2058 case Bytecodes::_getstatic : // fall through 2059 case Bytecodes::_putstatic : // fall through 2060 case Bytecodes::_getfield : // fall through 2061 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break; 2062 case Bytecodes::_invokevirtual : // fall through 2063 case Bytecodes::_invokespecial : // fall through 2064 case Bytecodes::_invokestatic : // fall through 2065 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; 2066 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break; 2067 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; 2068 default: 2069 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode()))); 2070 break; 2071 } 2072 // first time invocation - must resolve first 2073 __ call_VM(noreg, entry, O1); 2074 // Update registers with resolved info 2075 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); 2076 __ bind(resolved); 2077 } 2078 2079 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2080 Register method, 2081 Register itable_index, 2082 Register flags, 2083 bool is_invokevirtual, 2084 bool is_invokevfinal, 2085 bool is_invokedynamic) { 2086 // Uses both G3_scratch and G4_scratch 2087 Register cache = G3_scratch; 2088 Register index = G4_scratch; 2089 assert_different_registers(cache, method, itable_index); 2090 2091 // determine constant pool cache field offsets 2092 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2093 const int method_offset = in_bytes( 2094 ConstantPoolCache::base_offset() + 2095 ((byte_no == f2_byte) 2096 ? ConstantPoolCacheEntry::f2_offset() 2097 : ConstantPoolCacheEntry::f1_offset() 2098 ) 2099 ); 2100 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() + 2101 ConstantPoolCacheEntry::flags_offset()); 2102 // access constant pool cache fields 2103 const int index_offset = in_bytes(ConstantPoolCache::base_offset() + 2104 ConstantPoolCacheEntry::f2_offset()); 2105 2106 if (is_invokevfinal) { 2107 __ get_cache_and_index_at_bcp(cache, index, 1); 2108 __ ld_ptr(Address(cache, method_offset), method); 2109 } else { 2110 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2111 resolve_cache_and_index(byte_no, cache, index, index_size); 2112 __ ld_ptr(Address(cache, method_offset), method); 2113 } 2114 2115 if (itable_index != noreg) { 2116 // pick up itable or appendix index from f2 also: 2117 __ ld_ptr(Address(cache, index_offset), itable_index); 2118 } 2119 __ ld_ptr(Address(cache, flags_offset), flags); 2120 } 2121 2122 // The Rcache register must be set before call 2123 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2124 Register Rcache, 2125 Register index, 2126 Register Roffset, 2127 Register Rflags, 2128 bool is_static) { 2129 assert_different_registers(Rcache, Rflags, Roffset); 2130 2131 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2132 2133 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2134 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2135 if (is_static) { 2136 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj); 2137 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 2138 __ ld_ptr( Robj, mirror_offset, Robj); 2139 } 2140 } 2141 2142 // The registers Rcache and index expected to be set before call. 2143 // Correct values of the Rcache and index registers are preserved. 2144 void TemplateTable::jvmti_post_field_access(Register Rcache, 2145 Register index, 2146 bool is_static, 2147 bool has_tos) { 2148 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2149 2150 if (JvmtiExport::can_post_field_access()) { 2151 // Check to see if a field access watch has been set before we take 2152 // the time to call into the VM. 2153 Label Label1; 2154 assert_different_registers(Rcache, index, G1_scratch); 2155 AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr()); 2156 __ load_contents(get_field_access_count_addr, G1_scratch); 2157 __ cmp_and_br_short(G1_scratch, 0, Assembler::equal, Assembler::pt, Label1); 2158 2159 __ add(Rcache, in_bytes(cp_base_offset), Rcache); 2160 2161 if (is_static) { 2162 __ clr(Otos_i); 2163 } else { 2164 if (has_tos) { 2165 // save object pointer before call_VM() clobbers it 2166 __ push_ptr(Otos_i); // put object on tos where GC wants it. 2167 } else { 2168 // Load top of stack (do not pop the value off the stack); 2169 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); 2170 } 2171 __ verify_oop(Otos_i); 2172 } 2173 // Otos_i: object pointer or NULL if static 2174 // Rcache: cache entry pointer 2175 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), 2176 Otos_i, Rcache); 2177 if (!is_static && has_tos) { 2178 __ pop_ptr(Otos_i); // restore object pointer 2179 __ verify_oop(Otos_i); 2180 } 2181 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2182 __ bind(Label1); 2183 } 2184 } 2185 2186 void TemplateTable::getfield_or_static(int byte_no, bool is_static) { 2187 transition(vtos, vtos); 2188 2189 Register Rcache = G3_scratch; 2190 Register index = G4_scratch; 2191 Register Rclass = Rcache; 2192 Register Roffset= G4_scratch; 2193 Register Rflags = G1_scratch; 2194 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2195 2196 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2)); 2197 jvmti_post_field_access(Rcache, index, is_static, false); 2198 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); 2199 2200 if (!is_static) { 2201 pop_and_check_object(Rclass); 2202 } else { 2203 __ verify_oop(Rclass); 2204 } 2205 2206 Label exit; 2207 2208 Assembler::Membar_mask_bits membar_bits = 2209 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2210 2211 if (__ membar_has_effect(membar_bits)) { 2212 // Get volatile flag 2213 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2214 __ and3(Rflags, Lscratch, Lscratch); 2215 } 2216 2217 Label checkVolatile; 2218 2219 // compute field type 2220 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj; 2221 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2222 // Make sure we don't need to mask Rflags after the above shift 2223 ConstantPoolCacheEntry::verify_tos_state_shift(); 2224 2225 // Check atos before itos for getstatic, more likely (in Queens at least) 2226 __ cmp(Rflags, atos); 2227 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2228 __ delayed() ->cmp(Rflags, itos); 2229 2230 // atos 2231 __ load_heap_oop(Rclass, Roffset, Otos_i); 2232 __ verify_oop(Otos_i); 2233 __ push(atos); 2234 if (!is_static) { 2235 patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch); 2236 } 2237 __ ba(checkVolatile); 2238 __ delayed()->tst(Lscratch); 2239 2240 __ bind(notObj); 2241 2242 // cmp(Rflags, itos); 2243 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2244 __ delayed() ->cmp(Rflags, ltos); 2245 2246 // itos 2247 __ ld(Rclass, Roffset, Otos_i); 2248 __ push(itos); 2249 if (!is_static) { 2250 patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch); 2251 } 2252 __ ba(checkVolatile); 2253 __ delayed()->tst(Lscratch); 2254 2255 __ bind(notInt); 2256 2257 // cmp(Rflags, ltos); 2258 __ br(Assembler::notEqual, false, Assembler::pt, notLong); 2259 __ delayed() ->cmp(Rflags, btos); 2260 2261 // ltos 2262 // load must be atomic 2263 __ ld_long(Rclass, Roffset, Otos_l); 2264 __ push(ltos); 2265 if (!is_static) { 2266 patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch); 2267 } 2268 __ ba(checkVolatile); 2269 __ delayed()->tst(Lscratch); 2270 2271 __ bind(notLong); 2272 2273 // cmp(Rflags, btos); 2274 __ br(Assembler::notEqual, false, Assembler::pt, notByte); 2275 __ delayed() ->cmp(Rflags, ctos); 2276 2277 // btos 2278 __ ldsb(Rclass, Roffset, Otos_i); 2279 __ push(itos); 2280 if (!is_static) { 2281 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch); 2282 } 2283 __ ba(checkVolatile); 2284 __ delayed()->tst(Lscratch); 2285 2286 __ bind(notByte); 2287 2288 // cmp(Rflags, ctos); 2289 __ br(Assembler::notEqual, false, Assembler::pt, notChar); 2290 __ delayed() ->cmp(Rflags, stos); 2291 2292 // ctos 2293 __ lduh(Rclass, Roffset, Otos_i); 2294 __ push(itos); 2295 if (!is_static) { 2296 patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch); 2297 } 2298 __ ba(checkVolatile); 2299 __ delayed()->tst(Lscratch); 2300 2301 __ bind(notChar); 2302 2303 // cmp(Rflags, stos); 2304 __ br(Assembler::notEqual, false, Assembler::pt, notShort); 2305 __ delayed() ->cmp(Rflags, ftos); 2306 2307 // stos 2308 __ ldsh(Rclass, Roffset, Otos_i); 2309 __ push(itos); 2310 if (!is_static) { 2311 patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch); 2312 } 2313 __ ba(checkVolatile); 2314 __ delayed()->tst(Lscratch); 2315 2316 __ bind(notShort); 2317 2318 2319 // cmp(Rflags, ftos); 2320 __ br(Assembler::notEqual, false, Assembler::pt, notFloat); 2321 __ delayed() ->tst(Lscratch); 2322 2323 // ftos 2324 __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f); 2325 __ push(ftos); 2326 if (!is_static) { 2327 patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch); 2328 } 2329 __ ba(checkVolatile); 2330 __ delayed()->tst(Lscratch); 2331 2332 __ bind(notFloat); 2333 2334 2335 // dtos 2336 __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d); 2337 __ push(dtos); 2338 if (!is_static) { 2339 patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch); 2340 } 2341 2342 __ bind(checkVolatile); 2343 if (__ membar_has_effect(membar_bits)) { 2344 // __ tst(Lscratch); executed in delay slot 2345 __ br(Assembler::zero, false, Assembler::pt, exit); 2346 __ delayed()->nop(); 2347 volatile_barrier(membar_bits); 2348 } 2349 2350 __ bind(exit); 2351 } 2352 2353 2354 void TemplateTable::getfield(int byte_no) { 2355 getfield_or_static(byte_no, false); 2356 } 2357 2358 void TemplateTable::getstatic(int byte_no) { 2359 getfield_or_static(byte_no, true); 2360 } 2361 2362 2363 void TemplateTable::fast_accessfield(TosState state) { 2364 transition(atos, state); 2365 Register Rcache = G3_scratch; 2366 Register index = G4_scratch; 2367 Register Roffset = G4_scratch; 2368 Register Rflags = Rcache; 2369 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2370 2371 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2372 jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true); 2373 2374 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2375 2376 __ null_check(Otos_i); 2377 __ verify_oop(Otos_i); 2378 2379 Label exit; 2380 2381 Assembler::Membar_mask_bits membar_bits = 2382 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2383 if (__ membar_has_effect(membar_bits)) { 2384 // Get volatile flag 2385 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags); 2386 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2387 } 2388 2389 switch (bytecode()) { 2390 case Bytecodes::_fast_bgetfield: 2391 __ ldsb(Otos_i, Roffset, Otos_i); 2392 break; 2393 case Bytecodes::_fast_cgetfield: 2394 __ lduh(Otos_i, Roffset, Otos_i); 2395 break; 2396 case Bytecodes::_fast_sgetfield: 2397 __ ldsh(Otos_i, Roffset, Otos_i); 2398 break; 2399 case Bytecodes::_fast_igetfield: 2400 __ ld(Otos_i, Roffset, Otos_i); 2401 break; 2402 case Bytecodes::_fast_lgetfield: 2403 __ ld_long(Otos_i, Roffset, Otos_l); 2404 break; 2405 case Bytecodes::_fast_fgetfield: 2406 __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f); 2407 break; 2408 case Bytecodes::_fast_dgetfield: 2409 __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d); 2410 break; 2411 case Bytecodes::_fast_agetfield: 2412 __ load_heap_oop(Otos_i, Roffset, Otos_i); 2413 break; 2414 default: 2415 ShouldNotReachHere(); 2416 } 2417 2418 if (__ membar_has_effect(membar_bits)) { 2419 __ btst(Lscratch, Rflags); 2420 __ br(Assembler::zero, false, Assembler::pt, exit); 2421 __ delayed()->nop(); 2422 volatile_barrier(membar_bits); 2423 __ bind(exit); 2424 } 2425 2426 if (state == atos) { 2427 __ verify_oop(Otos_i); // does not blow flags! 2428 } 2429 } 2430 2431 void TemplateTable::jvmti_post_fast_field_mod() { 2432 if (JvmtiExport::can_post_field_modification()) { 2433 // Check to see if a field modification watch has been set before we take 2434 // the time to call into the VM. 2435 Label done; 2436 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); 2437 __ load_contents(get_field_modification_count_addr, G4_scratch); 2438 __ cmp_and_br_short(G4_scratch, 0, Assembler::equal, Assembler::pt, done); 2439 __ pop_ptr(G4_scratch); // copy the object pointer from tos 2440 __ verify_oop(G4_scratch); 2441 __ push_ptr(G4_scratch); // put the object pointer back on tos 2442 __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1); 2443 // Save tos values before call_VM() clobbers them. Since we have 2444 // to do it for every data type, we use the saved values as the 2445 // jvalue object. 2446 switch (bytecode()) { // save tos values before call_VM() clobbers them 2447 case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break; 2448 case Bytecodes::_fast_bputfield: // fall through 2449 case Bytecodes::_fast_sputfield: // fall through 2450 case Bytecodes::_fast_cputfield: // fall through 2451 case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break; 2452 case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break; 2453 case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break; 2454 // get words in right order for use as jvalue object 2455 case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break; 2456 } 2457 // setup pointer to jvalue object 2458 __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize); 2459 // G4_scratch: object pointer 2460 // G1_scratch: cache entry pointer 2461 // G3_scratch: jvalue object on the stack 2462 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch); 2463 switch (bytecode()) { // restore tos values 2464 case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break; 2465 case Bytecodes::_fast_bputfield: // fall through 2466 case Bytecodes::_fast_sputfield: // fall through 2467 case Bytecodes::_fast_cputfield: // fall through 2468 case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break; 2469 case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break; 2470 case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break; 2471 case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break; 2472 } 2473 __ bind(done); 2474 } 2475 } 2476 2477 // The registers Rcache and index expected to be set before call. 2478 // The function may destroy various registers, just not the Rcache and index registers. 2479 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) { 2480 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2481 2482 if (JvmtiExport::can_post_field_modification()) { 2483 // Check to see if a field modification watch has been set before we take 2484 // the time to call into the VM. 2485 Label Label1; 2486 assert_different_registers(Rcache, index, G1_scratch); 2487 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); 2488 __ load_contents(get_field_modification_count_addr, G1_scratch); 2489 __ cmp_and_br_short(G1_scratch, 0, Assembler::zero, Assembler::pt, Label1); 2490 2491 // The Rcache and index registers have been already set. 2492 // This allows to eliminate this call but the Rcache and index 2493 // registers must be correspondingly used after this line. 2494 __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1); 2495 2496 __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch); 2497 if (is_static) { 2498 // Life is simple. Null out the object pointer. 2499 __ clr(G4_scratch); 2500 } else { 2501 Register Rflags = G1_scratch; 2502 // Life is harder. The stack holds the value on top, followed by the 2503 // object. We don't know the size of the value, though; it could be 2504 // one or two words depending on its type. As a result, we must find 2505 // the type to determine where the object is. 2506 2507 Label two_word, valsizeknown; 2508 __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2509 __ mov(Lesp, G4_scratch); 2510 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2511 // Make sure we don't need to mask Rflags after the above shift 2512 ConstantPoolCacheEntry::verify_tos_state_shift(); 2513 __ cmp(Rflags, ltos); 2514 __ br(Assembler::equal, false, Assembler::pt, two_word); 2515 __ delayed()->cmp(Rflags, dtos); 2516 __ br(Assembler::equal, false, Assembler::pt, two_word); 2517 __ delayed()->nop(); 2518 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1)); 2519 __ ba_short(valsizeknown); 2520 __ bind(two_word); 2521 2522 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2)); 2523 2524 __ bind(valsizeknown); 2525 // setup object pointer 2526 __ ld_ptr(G4_scratch, 0, G4_scratch); 2527 __ verify_oop(G4_scratch); 2528 } 2529 // setup pointer to jvalue object 2530 __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize); 2531 // G4_scratch: object pointer or NULL if static 2532 // G3_scratch: cache entry pointer 2533 // G1_scratch: jvalue object on the stack 2534 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), 2535 G4_scratch, G3_scratch, G1_scratch); 2536 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2537 __ bind(Label1); 2538 } 2539 } 2540 2541 void TemplateTable::pop_and_check_object(Register r) { 2542 __ pop_ptr(r); 2543 __ null_check(r); // for field access must check obj. 2544 __ verify_oop(r); 2545 } 2546 2547 void TemplateTable::putfield_or_static(int byte_no, bool is_static) { 2548 transition(vtos, vtos); 2549 Register Rcache = G3_scratch; 2550 Register index = G4_scratch; 2551 Register Rclass = Rcache; 2552 Register Roffset= G4_scratch; 2553 Register Rflags = G1_scratch; 2554 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2555 2556 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2)); 2557 jvmti_post_field_mod(Rcache, index, is_static); 2558 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); 2559 2560 Assembler::Membar_mask_bits read_bits = 2561 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); 2562 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; 2563 2564 Label notVolatile, checkVolatile, exit; 2565 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { 2566 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2567 __ and3(Rflags, Lscratch, Lscratch); 2568 2569 if (__ membar_has_effect(read_bits)) { 2570 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); 2571 volatile_barrier(read_bits); 2572 __ bind(notVolatile); 2573 } 2574 } 2575 2576 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2577 // Make sure we don't need to mask Rflags after the above shift 2578 ConstantPoolCacheEntry::verify_tos_state_shift(); 2579 2580 // compute field type 2581 Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat; 2582 2583 if (is_static) { 2584 // putstatic with object type most likely, check that first 2585 __ cmp(Rflags, atos); 2586 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2587 __ delayed()->cmp(Rflags, itos); 2588 2589 // atos 2590 { 2591 __ pop_ptr(); 2592 __ verify_oop(Otos_i); 2593 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2594 __ ba(checkVolatile); 2595 __ delayed()->tst(Lscratch); 2596 } 2597 2598 __ bind(notObj); 2599 // cmp(Rflags, itos); 2600 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2601 __ delayed()->cmp(Rflags, btos); 2602 2603 // itos 2604 { 2605 __ pop_i(); 2606 __ st(Otos_i, Rclass, Roffset); 2607 __ ba(checkVolatile); 2608 __ delayed()->tst(Lscratch); 2609 } 2610 2611 __ bind(notInt); 2612 } else { 2613 // putfield with int type most likely, check that first 2614 __ cmp(Rflags, itos); 2615 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2616 __ delayed()->cmp(Rflags, atos); 2617 2618 // itos 2619 { 2620 __ pop_i(); 2621 pop_and_check_object(Rclass); 2622 __ st(Otos_i, Rclass, Roffset); 2623 patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no); 2624 __ ba(checkVolatile); 2625 __ delayed()->tst(Lscratch); 2626 } 2627 2628 __ bind(notInt); 2629 // cmp(Rflags, atos); 2630 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2631 __ delayed()->cmp(Rflags, btos); 2632 2633 // atos 2634 { 2635 __ pop_ptr(); 2636 pop_and_check_object(Rclass); 2637 __ verify_oop(Otos_i); 2638 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2639 patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no); 2640 __ ba(checkVolatile); 2641 __ delayed()->tst(Lscratch); 2642 } 2643 2644 __ bind(notObj); 2645 } 2646 2647 // cmp(Rflags, btos); 2648 __ br(Assembler::notEqual, false, Assembler::pt, notByte); 2649 __ delayed()->cmp(Rflags, ltos); 2650 2651 // btos 2652 { 2653 __ pop_i(); 2654 if (!is_static) pop_and_check_object(Rclass); 2655 __ stb(Otos_i, Rclass, Roffset); 2656 if (!is_static) { 2657 patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no); 2658 } 2659 __ ba(checkVolatile); 2660 __ delayed()->tst(Lscratch); 2661 } 2662 2663 __ bind(notByte); 2664 // cmp(Rflags, ltos); 2665 __ br(Assembler::notEqual, false, Assembler::pt, notLong); 2666 __ delayed()->cmp(Rflags, ctos); 2667 2668 // ltos 2669 { 2670 __ pop_l(); 2671 if (!is_static) pop_and_check_object(Rclass); 2672 __ st_long(Otos_l, Rclass, Roffset); 2673 if (!is_static) { 2674 patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no); 2675 } 2676 __ ba(checkVolatile); 2677 __ delayed()->tst(Lscratch); 2678 } 2679 2680 __ bind(notLong); 2681 // cmp(Rflags, ctos); 2682 __ br(Assembler::notEqual, false, Assembler::pt, notChar); 2683 __ delayed()->cmp(Rflags, stos); 2684 2685 // ctos (char) 2686 { 2687 __ pop_i(); 2688 if (!is_static) pop_and_check_object(Rclass); 2689 __ sth(Otos_i, Rclass, Roffset); 2690 if (!is_static) { 2691 patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no); 2692 } 2693 __ ba(checkVolatile); 2694 __ delayed()->tst(Lscratch); 2695 } 2696 2697 __ bind(notChar); 2698 // cmp(Rflags, stos); 2699 __ br(Assembler::notEqual, false, Assembler::pt, notShort); 2700 __ delayed()->cmp(Rflags, ftos); 2701 2702 // stos (short) 2703 { 2704 __ pop_i(); 2705 if (!is_static) pop_and_check_object(Rclass); 2706 __ sth(Otos_i, Rclass, Roffset); 2707 if (!is_static) { 2708 patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no); 2709 } 2710 __ ba(checkVolatile); 2711 __ delayed()->tst(Lscratch); 2712 } 2713 2714 __ bind(notShort); 2715 // cmp(Rflags, ftos); 2716 __ br(Assembler::notZero, false, Assembler::pt, notFloat); 2717 __ delayed()->nop(); 2718 2719 // ftos 2720 { 2721 __ pop_f(); 2722 if (!is_static) pop_and_check_object(Rclass); 2723 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); 2724 if (!is_static) { 2725 patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no); 2726 } 2727 __ ba(checkVolatile); 2728 __ delayed()->tst(Lscratch); 2729 } 2730 2731 __ bind(notFloat); 2732 2733 // dtos 2734 { 2735 __ pop_d(); 2736 if (!is_static) pop_and_check_object(Rclass); 2737 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); 2738 if (!is_static) { 2739 patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no); 2740 } 2741 } 2742 2743 __ bind(checkVolatile); 2744 __ tst(Lscratch); 2745 2746 if (__ membar_has_effect(write_bits)) { 2747 // __ tst(Lscratch); in delay slot 2748 __ br(Assembler::zero, false, Assembler::pt, exit); 2749 __ delayed()->nop(); 2750 volatile_barrier(Assembler::StoreLoad); 2751 __ bind(exit); 2752 } 2753 } 2754 2755 void TemplateTable::fast_storefield(TosState state) { 2756 transition(state, vtos); 2757 Register Rcache = G3_scratch; 2758 Register Rclass = Rcache; 2759 Register Roffset= G4_scratch; 2760 Register Rflags = G1_scratch; 2761 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2762 2763 jvmti_post_fast_field_mod(); 2764 2765 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1); 2766 2767 Assembler::Membar_mask_bits read_bits = 2768 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); 2769 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; 2770 2771 Label notVolatile, checkVolatile, exit; 2772 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { 2773 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2774 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2775 __ and3(Rflags, Lscratch, Lscratch); 2776 if (__ membar_has_effect(read_bits)) { 2777 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); 2778 volatile_barrier(read_bits); 2779 __ bind(notVolatile); 2780 } 2781 } 2782 2783 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2784 pop_and_check_object(Rclass); 2785 2786 switch (bytecode()) { 2787 case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break; 2788 case Bytecodes::_fast_cputfield: /* fall through */ 2789 case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break; 2790 case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break; 2791 case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break; 2792 case Bytecodes::_fast_fputfield: 2793 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); 2794 break; 2795 case Bytecodes::_fast_dputfield: 2796 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); 2797 break; 2798 case Bytecodes::_fast_aputfield: 2799 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2800 break; 2801 default: 2802 ShouldNotReachHere(); 2803 } 2804 2805 if (__ membar_has_effect(write_bits)) { 2806 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, exit); 2807 volatile_barrier(Assembler::StoreLoad); 2808 __ bind(exit); 2809 } 2810 } 2811 2812 2813 void TemplateTable::putfield(int byte_no) { 2814 putfield_or_static(byte_no, false); 2815 } 2816 2817 void TemplateTable::putstatic(int byte_no) { 2818 putfield_or_static(byte_no, true); 2819 } 2820 2821 2822 void TemplateTable::fast_xaccess(TosState state) { 2823 transition(vtos, state); 2824 Register Rcache = G3_scratch; 2825 Register Roffset = G4_scratch; 2826 Register Rflags = G4_scratch; 2827 Register Rreceiver = Lscratch; 2828 2829 __ ld_ptr(Llocals, 0, Rreceiver); 2830 2831 // access constant pool cache (is resolved) 2832 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2); 2833 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset); 2834 __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp 2835 2836 __ verify_oop(Rreceiver); 2837 __ null_check(Rreceiver); 2838 if (state == atos) { 2839 __ load_heap_oop(Rreceiver, Roffset, Otos_i); 2840 } else if (state == itos) { 2841 __ ld (Rreceiver, Roffset, Otos_i) ; 2842 } else if (state == ftos) { 2843 __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f); 2844 } else { 2845 ShouldNotReachHere(); 2846 } 2847 2848 Assembler::Membar_mask_bits membar_bits = 2849 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2850 if (__ membar_has_effect(membar_bits)) { 2851 2852 // Get is_volatile value in Rflags and check if membar is needed 2853 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags); 2854 2855 // Test volatile 2856 Label notVolatile; 2857 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2858 __ btst(Rflags, Lscratch); 2859 __ br(Assembler::zero, false, Assembler::pt, notVolatile); 2860 __ delayed()->nop(); 2861 volatile_barrier(membar_bits); 2862 __ bind(notVolatile); 2863 } 2864 2865 __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 2866 __ sub(Lbcp, 1, Lbcp); 2867 } 2868 2869 //---------------------------------------------------------------------------------------------------- 2870 // Calls 2871 2872 void TemplateTable::count_calls(Register method, Register temp) { 2873 // implemented elsewhere 2874 ShouldNotReachHere(); 2875 } 2876 2877 void TemplateTable::prepare_invoke(int byte_no, 2878 Register method, // linked method (or i-klass) 2879 Register ra, // return address 2880 Register index, // itable index, MethodType, etc. 2881 Register recv, // if caller wants to see it 2882 Register flags // if caller wants to test it 2883 ) { 2884 // determine flags 2885 const Bytecodes::Code code = bytecode(); 2886 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 2887 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 2888 const bool is_invokehandle = code == Bytecodes::_invokehandle; 2889 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 2890 const bool is_invokespecial = code == Bytecodes::_invokespecial; 2891 const bool load_receiver = (recv != noreg); 2892 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 2893 assert(recv == noreg || recv == O0, ""); 2894 assert(flags == noreg || flags == O1, ""); 2895 2896 // setup registers & access constant pool cache 2897 if (recv == noreg) recv = O0; 2898 if (flags == noreg) flags = O1; 2899 const Register temp = O2; 2900 assert_different_registers(method, ra, index, recv, flags, temp); 2901 2902 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); 2903 2904 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 2905 2906 // maybe push appendix to arguments 2907 if (is_invokedynamic || is_invokehandle) { 2908 Label L_no_push; 2909 __ set((1 << ConstantPoolCacheEntry::has_appendix_shift), temp); 2910 __ btst(flags, temp); 2911 __ br(Assembler::zero, false, Assembler::pt, L_no_push); 2912 __ delayed()->nop(); 2913 // Push the appendix as a trailing parameter. 2914 // This must be done before we get the receiver, 2915 // since the parameter_size includes it. 2916 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0"); 2917 __ load_resolved_reference_at_index(temp, index); 2918 __ verify_oop(temp); 2919 __ push_ptr(temp); // push appendix (MethodType, CallSite, etc.) 2920 __ bind(L_no_push); 2921 } 2922 2923 // load receiver if needed (after appendix is pushed so parameter size is correct) 2924 if (load_receiver) { 2925 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, temp); // get parameter size 2926 __ load_receiver(temp, recv); // __ argument_address uses Gargs but we need Lesp 2927 __ verify_oop(recv); 2928 } 2929 2930 // compute return type 2931 __ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra); 2932 // Make sure we don't need to mask flags after the above shift 2933 ConstantPoolCacheEntry::verify_tos_state_shift(); 2934 // load return address 2935 { 2936 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 2937 AddressLiteral table(table_addr); 2938 __ set(table, temp); 2939 __ sll(ra, LogBytesPerWord, ra); 2940 __ ld_ptr(Address(temp, ra), ra); 2941 } 2942 } 2943 2944 2945 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) { 2946 Register Rcall = Rindex; 2947 assert_different_registers(Rcall, G5_method, Gargs, Rret); 2948 2949 // get target Method* & entry point 2950 __ lookup_virtual_method(Rrecv, Rindex, G5_method); 2951 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 2952 __ call_from_interpreter(Rcall, Gargs, Rret); 2953 } 2954 2955 void TemplateTable::invokevirtual(int byte_no) { 2956 transition(vtos, vtos); 2957 assert(byte_no == f2_byte, "use this argument"); 2958 2959 Register Rscratch = G3_scratch; 2960 Register Rtemp = G4_scratch; 2961 Register Rret = Lscratch; 2962 Register O0_recv = O0; 2963 Label notFinal; 2964 2965 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false); 2966 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 2967 2968 // Check for vfinal 2969 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), G4_scratch); 2970 __ btst(Rret, G4_scratch); 2971 __ br(Assembler::zero, false, Assembler::pt, notFinal); 2972 __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters 2973 2974 patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp); 2975 2976 invokevfinal_helper(Rscratch, Rret); 2977 2978 __ bind(notFinal); 2979 2980 __ mov(G5_method, Rscratch); // better scratch register 2981 __ load_receiver(G4_scratch, O0_recv); // gets receiverOop 2982 // receiver is in O0_recv 2983 __ verify_oop(O0_recv); 2984 2985 // get return address 2986 AddressLiteral table(Interpreter::invoke_return_entry_table()); 2987 __ set(table, Rtemp); 2988 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type 2989 // Make sure we don't need to mask Rret after the above shift 2990 ConstantPoolCacheEntry::verify_tos_state_shift(); 2991 __ sll(Rret, LogBytesPerWord, Rret); 2992 __ ld_ptr(Rtemp, Rret, Rret); // get return address 2993 2994 // get receiver klass 2995 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); 2996 __ load_klass(O0_recv, O0_recv); 2997 __ verify_klass_ptr(O0_recv); 2998 2999 __ profile_virtual_call(O0_recv, O4); 3000 3001 generate_vtable_call(O0_recv, Rscratch, Rret); 3002 } 3003 3004 void TemplateTable::fast_invokevfinal(int byte_no) { 3005 transition(vtos, vtos); 3006 assert(byte_no == f2_byte, "use this argument"); 3007 3008 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true, 3009 /*is_invokevfinal*/true, false); 3010 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 3011 invokevfinal_helper(G3_scratch, Lscratch); 3012 } 3013 3014 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) { 3015 Register Rtemp = G4_scratch; 3016 3017 // Load receiver from stack slot 3018 __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G4_scratch); 3019 __ lduh(G4_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), G4_scratch); 3020 __ load_receiver(G4_scratch, O0); 3021 3022 // receiver NULL check 3023 __ null_check(O0); 3024 3025 __ profile_final_call(O4); 3026 __ profile_arguments_type(G5_method, Rscratch, Gargs, true); 3027 3028 // get return address 3029 AddressLiteral table(Interpreter::invoke_return_entry_table()); 3030 __ set(table, Rtemp); 3031 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type 3032 // Make sure we don't need to mask Rret after the above shift 3033 ConstantPoolCacheEntry::verify_tos_state_shift(); 3034 __ sll(Rret, LogBytesPerWord, Rret); 3035 __ ld_ptr(Rtemp, Rret, Rret); // get return address 3036 3037 3038 // do the call 3039 __ call_from_interpreter(Rscratch, Gargs, Rret); 3040 } 3041 3042 3043 void TemplateTable::invokespecial(int byte_no) { 3044 transition(vtos, vtos); 3045 assert(byte_no == f1_byte, "use this argument"); 3046 3047 const Register Rret = Lscratch; 3048 const Register O0_recv = O0; 3049 const Register Rscratch = G3_scratch; 3050 3051 prepare_invoke(byte_no, G5_method, Rret, noreg, O0_recv); // get receiver also for null check 3052 __ null_check(O0_recv); 3053 3054 // do the call 3055 __ profile_call(O4); 3056 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3057 __ call_from_interpreter(Rscratch, Gargs, Rret); 3058 } 3059 3060 3061 void TemplateTable::invokestatic(int byte_no) { 3062 transition(vtos, vtos); 3063 assert(byte_no == f1_byte, "use this argument"); 3064 3065 const Register Rret = Lscratch; 3066 const Register Rscratch = G3_scratch; 3067 3068 prepare_invoke(byte_no, G5_method, Rret); // get f1 Method* 3069 3070 // do the call 3071 __ profile_call(O4); 3072 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3073 __ call_from_interpreter(Rscratch, Gargs, Rret); 3074 } 3075 3076 void TemplateTable::invokeinterface_object_method(Register RKlass, 3077 Register Rcall, 3078 Register Rret, 3079 Register Rflags) { 3080 Register Rscratch = G4_scratch; 3081 Register Rindex = Lscratch; 3082 3083 assert_different_registers(Rscratch, Rindex, Rret); 3084 3085 Label notFinal; 3086 3087 // Check for vfinal 3088 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch); 3089 __ btst(Rflags, Rscratch); 3090 __ br(Assembler::zero, false, Assembler::pt, notFinal); 3091 __ delayed()->nop(); 3092 3093 __ profile_final_call(O4); 3094 3095 // do the call - the index (f2) contains the Method* 3096 assert_different_registers(G5_method, Gargs, Rcall); 3097 __ mov(Rindex, G5_method); 3098 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 3099 __ call_from_interpreter(Rcall, Gargs, Rret); 3100 __ bind(notFinal); 3101 3102 __ profile_virtual_call(RKlass, O4); 3103 generate_vtable_call(RKlass, Rindex, Rret); 3104 } 3105 3106 3107 void TemplateTable::invokeinterface(int byte_no) { 3108 transition(vtos, vtos); 3109 assert(byte_no == f1_byte, "use this argument"); 3110 3111 const Register Rinterface = G1_scratch; 3112 const Register Rret = G3_scratch; 3113 const Register Rindex = Lscratch; 3114 const Register O0_recv = O0; 3115 const Register O1_flags = O1; 3116 const Register O2_Klass = O2; 3117 const Register Rscratch = G4_scratch; 3118 assert_different_registers(Rscratch, G5_method); 3119 3120 prepare_invoke(byte_no, Rinterface, Rret, Rindex, O0_recv, O1_flags); 3121 3122 // get receiver klass 3123 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); 3124 __ load_klass(O0_recv, O2_Klass); 3125 3126 // Special case of invokeinterface called for virtual method of 3127 // java.lang.Object. See cpCacheOop.cpp for details. 3128 // This code isn't produced by javac, but could be produced by 3129 // another compliant java compiler. 3130 Label notMethod; 3131 __ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch); 3132 __ btst(O1_flags, Rscratch); 3133 __ br(Assembler::zero, false, Assembler::pt, notMethod); 3134 __ delayed()->nop(); 3135 3136 invokeinterface_object_method(O2_Klass, Rinterface, Rret, O1_flags); 3137 3138 __ bind(notMethod); 3139 3140 __ profile_virtual_call(O2_Klass, O4); 3141 3142 // 3143 // find entry point to call 3144 // 3145 3146 // compute start of first itableOffsetEntry (which is at end of vtable) 3147 const int base = InstanceKlass::vtable_start_offset() * wordSize; 3148 Label search; 3149 Register Rtemp = O1_flags; 3150 3151 __ ld(O2_Klass, InstanceKlass::vtable_length_offset() * wordSize, Rtemp); 3152 if (align_object_offset(1) > 1) { 3153 __ round_to(Rtemp, align_object_offset(1)); 3154 } 3155 __ sll(Rtemp, LogBytesPerWord, Rtemp); // Rscratch *= 4; 3156 if (Assembler::is_simm13(base)) { 3157 __ add(Rtemp, base, Rtemp); 3158 } else { 3159 __ set(base, Rscratch); 3160 __ add(Rscratch, Rtemp, Rtemp); 3161 } 3162 __ add(O2_Klass, Rtemp, Rscratch); 3163 3164 __ bind(search); 3165 3166 __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp); 3167 { 3168 Label ok; 3169 3170 // Check that entry is non-null. Null entries are probably a bytecode 3171 // problem. If the interface isn't implemented by the receiver class, 3172 // the VM should throw IncompatibleClassChangeError. linkResolver checks 3173 // this too but that's only if the entry isn't already resolved, so we 3174 // need to check again. 3175 __ br_notnull_short( Rtemp, Assembler::pt, ok); 3176 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); 3177 __ should_not_reach_here(); 3178 __ bind(ok); 3179 } 3180 3181 __ cmp(Rinterface, Rtemp); 3182 __ brx(Assembler::notEqual, true, Assembler::pn, search); 3183 __ delayed()->add(Rscratch, itableOffsetEntry::size() * wordSize, Rscratch); 3184 3185 // entry found and Rscratch points to it 3186 __ ld(Rscratch, itableOffsetEntry::offset_offset_in_bytes(), Rscratch); 3187 3188 assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below"); 3189 __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8; 3190 __ add(Rscratch, Rindex, Rscratch); 3191 __ ld_ptr(O2_Klass, Rscratch, G5_method); 3192 3193 // Check for abstract method error. 3194 { 3195 Label ok; 3196 __ br_notnull_short(G5_method, Assembler::pt, ok); 3197 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 3198 __ should_not_reach_here(); 3199 __ bind(ok); 3200 } 3201 3202 Register Rcall = Rinterface; 3203 assert_different_registers(Rcall, G5_method, Gargs, Rret); 3204 3205 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 3206 __ call_from_interpreter(Rcall, Gargs, Rret); 3207 } 3208 3209 void TemplateTable::invokehandle(int byte_no) { 3210 transition(vtos, vtos); 3211 assert(byte_no == f1_byte, "use this argument"); 3212 3213 const Register Rret = Lscratch; 3214 const Register G4_mtype = G4_scratch; 3215 const Register O0_recv = O0; 3216 const Register Rscratch = G3_scratch; 3217 3218 prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv); 3219 __ null_check(O0_recv); 3220 3221 // G4: MethodType object (from cpool->resolved_references[f1], if necessary) 3222 // G5: MH.invokeExact_MT method (from f2) 3223 3224 // Note: G4_mtype is already pushed (if necessary) by prepare_invoke 3225 3226 // do the call 3227 __ verify_oop(G4_mtype); 3228 __ profile_final_call(O4); // FIXME: profile the LambdaForm also 3229 __ profile_arguments_type(G5_method, Rscratch, Gargs, true); 3230 __ call_from_interpreter(Rscratch, Gargs, Rret); 3231 } 3232 3233 3234 void TemplateTable::invokedynamic(int byte_no) { 3235 transition(vtos, vtos); 3236 assert(byte_no == f1_byte, "use this argument"); 3237 3238 const Register Rret = Lscratch; 3239 const Register G4_callsite = G4_scratch; 3240 const Register Rscratch = G3_scratch; 3241 3242 prepare_invoke(byte_no, G5_method, Rret, G4_callsite); 3243 3244 // G4: CallSite object (from cpool->resolved_references[f1]) 3245 // G5: MH.linkToCallSite method (from f2) 3246 3247 // Note: G4_callsite is already pushed by prepare_invoke 3248 3249 // %%% should make a type profile for any invokedynamic that takes a ref argument 3250 // profile this call 3251 __ profile_call(O4); 3252 3253 // do the call 3254 __ verify_oop(G4_callsite); 3255 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3256 __ call_from_interpreter(Rscratch, Gargs, Rret); 3257 } 3258 3259 3260 //---------------------------------------------------------------------------------------------------- 3261 // Allocation 3262 3263 void TemplateTable::_new() { 3264 transition(vtos, atos); 3265 3266 Label slow_case; 3267 Label done; 3268 Label initialize_header; 3269 Label initialize_object; // including clearing the fields 3270 3271 Register RallocatedObject = Otos_i; 3272 Register RinstanceKlass = O1; 3273 Register Roffset = O3; 3274 Register Rscratch = O4; 3275 3276 __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3277 __ get_cpool_and_tags(Rscratch, G3_scratch); 3278 // make sure the class we're about to instantiate has been resolved 3279 // This is done before loading InstanceKlass to be consistent with the order 3280 // how Constant Pool is updated (see ConstantPool::klass_at_put) 3281 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3282 __ ldub(G3_scratch, Roffset, G3_scratch); 3283 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3284 __ br(Assembler::notEqual, false, Assembler::pn, slow_case); 3285 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3286 // get InstanceKlass 3287 //__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot 3288 __ add(Roffset, sizeof(ConstantPool), Roffset); 3289 __ ld_ptr(Rscratch, Roffset, RinstanceKlass); 3290 3291 // make sure klass is fully initialized: 3292 __ ldub(RinstanceKlass, in_bytes(InstanceKlass::init_state_offset()), G3_scratch); 3293 __ cmp(G3_scratch, InstanceKlass::fully_initialized); 3294 __ br(Assembler::notEqual, false, Assembler::pn, slow_case); 3295 __ delayed()->ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset); 3296 3297 // get instance_size in InstanceKlass (already aligned) 3298 //__ ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset); 3299 3300 // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class 3301 __ btst(Klass::_lh_instance_slow_path_bit, Roffset); 3302 __ br(Assembler::notZero, false, Assembler::pn, slow_case); 3303 __ delayed()->nop(); 3304 3305 // allocate the instance 3306 // 1) Try to allocate in the TLAB 3307 // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden 3308 // 3) if the above fails (or is not applicable), go to a slow case 3309 // (creates a new TLAB, etc.) 3310 3311 const bool allow_shared_alloc = 3312 Universe::heap()->supports_inline_contig_alloc(); 3313 3314 if(UseTLAB) { 3315 Register RoldTopValue = RallocatedObject; 3316 Register RtlabWasteLimitValue = G3_scratch; 3317 Register RnewTopValue = G1_scratch; 3318 Register RendValue = Rscratch; 3319 Register RfreeValue = RnewTopValue; 3320 3321 // check if we can allocate in the TLAB 3322 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject 3323 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue); 3324 __ add(RoldTopValue, Roffset, RnewTopValue); 3325 3326 // if there is enough space, we do not CAS and do not clear 3327 __ cmp(RnewTopValue, RendValue); 3328 if(ZeroTLAB) { 3329 // the fields have already been cleared 3330 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header); 3331 } else { 3332 // initialize both the header and fields 3333 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object); 3334 } 3335 __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3336 3337 if (allow_shared_alloc) { 3338 // Check if tlab should be discarded (refill_waste_limit >= free) 3339 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue); 3340 __ sub(RendValue, RoldTopValue, RfreeValue); 3341 #ifdef _LP64 3342 __ srlx(RfreeValue, LogHeapWordSize, RfreeValue); 3343 #else 3344 __ srl(RfreeValue, LogHeapWordSize, RfreeValue); 3345 #endif 3346 __ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small 3347 3348 // increment waste limit to prevent getting stuck on this slow path 3349 __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue); 3350 __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); 3351 } else { 3352 // No allocation in the shared eden. 3353 __ ba_short(slow_case); 3354 } 3355 } 3356 3357 // Allocation in the shared Eden 3358 if (allow_shared_alloc) { 3359 Register RoldTopValue = G1_scratch; 3360 Register RtopAddr = G3_scratch; 3361 Register RnewTopValue = RallocatedObject; 3362 Register RendValue = Rscratch; 3363 3364 __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr); 3365 3366 Label retry; 3367 __ bind(retry); 3368 __ set((intptr_t)Universe::heap()->end_addr(), RendValue); 3369 __ ld_ptr(RendValue, 0, RendValue); 3370 __ ld_ptr(RtopAddr, 0, RoldTopValue); 3371 __ add(RoldTopValue, Roffset, RnewTopValue); 3372 3373 // RnewTopValue contains the top address after the new object 3374 // has been allocated. 3375 __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case); 3376 3377 __ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue); 3378 3379 // if someone beat us on the allocation, try again, otherwise continue 3380 __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry); 3381 3382 // bump total bytes allocated by this thread 3383 // RoldTopValue and RtopAddr are dead, so can use G1 and G3 3384 __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch); 3385 } 3386 3387 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { 3388 // clear object fields 3389 __ bind(initialize_object); 3390 __ deccc(Roffset, sizeof(oopDesc)); 3391 __ br(Assembler::zero, false, Assembler::pt, initialize_header); 3392 __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch); 3393 3394 // initialize remaining object fields 3395 if (UseBlockZeroing) { 3396 // Use BIS for zeroing 3397 __ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header); 3398 } else { 3399 Label loop; 3400 __ subcc(Roffset, wordSize, Roffset); 3401 __ bind(loop); 3402 //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot 3403 __ st_ptr(G0, G3_scratch, Roffset); 3404 __ br(Assembler::notEqual, false, Assembler::pt, loop); 3405 __ delayed()->subcc(Roffset, wordSize, Roffset); 3406 } 3407 __ ba_short(initialize_header); 3408 } 3409 3410 // slow case 3411 __ bind(slow_case); 3412 __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned); 3413 __ get_constant_pool(O1); 3414 3415 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2); 3416 3417 __ ba_short(done); 3418 3419 // Initialize the header: mark, klass 3420 __ bind(initialize_header); 3421 3422 if (UseBiasedLocking) { 3423 __ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch); 3424 } else { 3425 __ set((intptr_t)markOopDesc::prototype(), G4_scratch); 3426 } 3427 __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark 3428 __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed 3429 __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms) 3430 3431 { 3432 SkipIfEqual skip_if( 3433 _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero); 3434 // Trigger dtrace event 3435 __ push(atos); 3436 __ call_VM_leaf(noreg, 3437 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0); 3438 __ pop(atos); 3439 } 3440 3441 // continue 3442 __ bind(done); 3443 } 3444 3445 3446 3447 void TemplateTable::newarray() { 3448 transition(itos, atos); 3449 __ ldub(Lbcp, 1, O1); 3450 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i); 3451 } 3452 3453 3454 void TemplateTable::anewarray() { 3455 transition(itos, atos); 3456 __ get_constant_pool(O1); 3457 __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned); 3458 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i); 3459 } 3460 3461 3462 void TemplateTable::arraylength() { 3463 transition(atos, itos); 3464 Label ok; 3465 __ verify_oop(Otos_i); 3466 __ tst(Otos_i); 3467 __ throw_if_not_1_x( Assembler::notZero, ok ); 3468 __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i); 3469 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok); 3470 } 3471 3472 3473 void TemplateTable::checkcast() { 3474 transition(atos, atos); 3475 Label done, is_null, quicked, cast_ok, resolved; 3476 Register Roffset = G1_scratch; 3477 Register RobjKlass = O5; 3478 Register RspecifiedKlass = O4; 3479 3480 // Check for casting a NULL 3481 __ br_null_short(Otos_i, Assembler::pn, is_null); 3482 3483 // Get value klass in RobjKlass 3484 __ load_klass(Otos_i, RobjKlass); // get value klass 3485 3486 // Get constant pool tag 3487 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3488 3489 // See if the checkcast has been quickened 3490 __ get_cpool_and_tags(Lscratch, G3_scratch); 3491 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3492 __ ldub(G3_scratch, Roffset, G3_scratch); 3493 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3494 __ br(Assembler::equal, true, Assembler::pt, quicked); 3495 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3496 3497 __ push_ptr(); // save receiver for result, and for GC 3498 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); 3499 __ get_vm_result_2(RspecifiedKlass); 3500 __ pop_ptr(Otos_i, G3_scratch); // restore receiver 3501 3502 __ ba_short(resolved); 3503 3504 // Extract target class from constant pool 3505 __ bind(quicked); 3506 __ add(Roffset, sizeof(ConstantPool), Roffset); 3507 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass); 3508 __ bind(resolved); 3509 __ load_klass(Otos_i, RobjKlass); // get value klass 3510 3511 // Generate a fast subtype check. Branch to cast_ok if no 3512 // failure. Throw exception if failure. 3513 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok ); 3514 3515 // Not a subtype; so must throw exception 3516 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch ); 3517 3518 __ bind(cast_ok); 3519 3520 if (ProfileInterpreter) { 3521 __ ba_short(done); 3522 } 3523 __ bind(is_null); 3524 __ profile_null_seen(G3_scratch); 3525 __ bind(done); 3526 } 3527 3528 3529 void TemplateTable::instanceof() { 3530 Label done, is_null, quicked, resolved; 3531 transition(atos, itos); 3532 Register Roffset = G1_scratch; 3533 Register RobjKlass = O5; 3534 Register RspecifiedKlass = O4; 3535 3536 // Check for casting a NULL 3537 __ br_null_short(Otos_i, Assembler::pt, is_null); 3538 3539 // Get value klass in RobjKlass 3540 __ load_klass(Otos_i, RobjKlass); // get value klass 3541 3542 // Get constant pool tag 3543 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3544 3545 // See if the checkcast has been quickened 3546 __ get_cpool_and_tags(Lscratch, G3_scratch); 3547 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3548 __ ldub(G3_scratch, Roffset, G3_scratch); 3549 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3550 __ br(Assembler::equal, true, Assembler::pt, quicked); 3551 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3552 3553 __ push_ptr(); // save receiver for result, and for GC 3554 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); 3555 __ get_vm_result_2(RspecifiedKlass); 3556 __ pop_ptr(Otos_i, G3_scratch); // restore receiver 3557 3558 __ ba_short(resolved); 3559 3560 // Extract target class from constant pool 3561 __ bind(quicked); 3562 __ add(Roffset, sizeof(ConstantPool), Roffset); 3563 __ get_constant_pool(Lscratch); 3564 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass); 3565 __ bind(resolved); 3566 __ load_klass(Otos_i, RobjKlass); // get value klass 3567 3568 // Generate a fast subtype check. Branch to cast_ok if no 3569 // failure. Return 0 if failure. 3570 __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed 3571 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done ); 3572 // Not a subtype; return 0; 3573 __ clr( Otos_i ); 3574 3575 if (ProfileInterpreter) { 3576 __ ba_short(done); 3577 } 3578 __ bind(is_null); 3579 __ profile_null_seen(G3_scratch); 3580 __ bind(done); 3581 } 3582 3583 void TemplateTable::_breakpoint() { 3584 3585 // Note: We get here even if we are single stepping.. 3586 // jbug inists on setting breakpoints at every bytecode 3587 // even if we are in single step mode. 3588 3589 transition(vtos, vtos); 3590 // get the unpatched byte code 3591 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp); 3592 __ mov(O0, Lbyte_code); 3593 3594 // post the breakpoint event 3595 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp); 3596 3597 // complete the execution of original bytecode 3598 __ dispatch_normal(vtos); 3599 } 3600 3601 3602 //---------------------------------------------------------------------------------------------------- 3603 // Exceptions 3604 3605 void TemplateTable::athrow() { 3606 transition(atos, vtos); 3607 3608 // This works because exception is cached in Otos_i which is same as O0, 3609 // which is same as what throw_exception_entry_expects 3610 assert(Otos_i == Oexception, "see explanation above"); 3611 3612 __ verify_oop(Otos_i); 3613 __ null_check(Otos_i); 3614 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch); 3615 } 3616 3617 3618 //---------------------------------------------------------------------------------------------------- 3619 // Synchronization 3620 3621 3622 // See frame_sparc.hpp for monitor block layout. 3623 // Monitor elements are dynamically allocated by growing stack as needed. 3624 3625 void TemplateTable::monitorenter() { 3626 transition(atos, vtos); 3627 __ verify_oop(Otos_i); 3628 // Try to acquire a lock on the object 3629 // Repeat until succeeded (i.e., until 3630 // monitorenter returns true). 3631 3632 { Label ok; 3633 __ tst(Otos_i); 3634 __ throw_if_not_1_x( Assembler::notZero, ok); 3635 __ delayed()->mov(Otos_i, Lscratch); // save obj 3636 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok); 3637 } 3638 3639 assert(O0 == Otos_i, "Be sure where the object to lock is"); 3640 3641 // find a free slot in the monitor block 3642 3643 3644 // initialize entry pointer 3645 __ clr(O1); // points to free slot or NULL 3646 3647 { 3648 Label entry, loop, exit; 3649 __ add( __ top_most_monitor(), O2 ); // last one to check 3650 __ ba( entry ); 3651 __ delayed()->mov( Lmonitors, O3 ); // first one to check 3652 3653 3654 __ bind( loop ); 3655 3656 __ verify_oop(O4); // verify each monitor's oop 3657 __ tst(O4); // is this entry unused? 3658 __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1); 3659 3660 __ cmp(O4, O0); // check if current entry is for same object 3661 __ brx( Assembler::equal, false, Assembler::pn, exit ); 3662 __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one 3663 3664 __ bind( entry ); 3665 3666 __ cmp( O3, O2 ); 3667 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop ); 3668 __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4); 3669 3670 __ bind( exit ); 3671 } 3672 3673 { Label allocated; 3674 3675 // found free slot? 3676 __ br_notnull_short(O1, Assembler::pn, allocated); 3677 3678 __ add_monitor_to_stack( false, O2, O3 ); 3679 __ mov(Lmonitors, O1); 3680 3681 __ bind(allocated); 3682 } 3683 3684 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 3685 // The object has already been poped from the stack, so the expression stack looks correct. 3686 __ inc(Lbcp); 3687 3688 __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object 3689 __ lock_object(O1, O0); 3690 3691 // check if there's enough space on the stack for the monitors after locking 3692 __ generate_stack_overflow_check(0); 3693 3694 // The bcp has already been incremented. Just need to dispatch to next instruction. 3695 __ dispatch_next(vtos); 3696 } 3697 3698 3699 void TemplateTable::monitorexit() { 3700 transition(atos, vtos); 3701 __ verify_oop(Otos_i); 3702 __ tst(Otos_i); 3703 __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch ); 3704 3705 assert(O0 == Otos_i, "just checking"); 3706 3707 { Label entry, loop, found; 3708 __ add( __ top_most_monitor(), O2 ); // last one to check 3709 __ ba(entry); 3710 // use Lscratch to hold monitor elem to check, start with most recent monitor, 3711 // By using a local it survives the call to the C routine. 3712 __ delayed()->mov( Lmonitors, Lscratch ); 3713 3714 __ bind( loop ); 3715 3716 __ verify_oop(O4); // verify each monitor's oop 3717 __ cmp(O4, O0); // check if current entry is for desired object 3718 __ brx( Assembler::equal, true, Assembler::pt, found ); 3719 __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit 3720 3721 __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next 3722 3723 __ bind( entry ); 3724 3725 __ cmp( Lscratch, O2 ); 3726 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop ); 3727 __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4); 3728 3729 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 3730 __ should_not_reach_here(); 3731 3732 __ bind(found); 3733 } 3734 __ unlock_object(O1); 3735 } 3736 3737 3738 //---------------------------------------------------------------------------------------------------- 3739 // Wide instructions 3740 3741 void TemplateTable::wide() { 3742 transition(vtos, vtos); 3743 __ ldub(Lbcp, 1, G3_scratch);// get next bc 3744 __ sll(G3_scratch, LogBytesPerWord, G3_scratch); 3745 AddressLiteral ep(Interpreter::_wentry_point); 3746 __ set(ep, G4_scratch); 3747 __ ld_ptr(G4_scratch, G3_scratch, G3_scratch); 3748 __ jmp(G3_scratch, G0); 3749 __ delayed()->nop(); 3750 // Note: the Lbcp increment step is part of the individual wide bytecode implementations 3751 } 3752 3753 3754 //---------------------------------------------------------------------------------------------------- 3755 // Multi arrays 3756 3757 void TemplateTable::multianewarray() { 3758 transition(vtos, atos); 3759 // put ndims * wordSize into Lscratch 3760 __ ldub( Lbcp, 3, Lscratch); 3761 __ sll( Lscratch, Interpreter::logStackElementSize, Lscratch); 3762 // Lesp points past last_dim, so set to O1 to first_dim address 3763 __ add( Lesp, Lscratch, O1); 3764 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1); 3765 __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack 3766 } 3767 #endif /* !CC_INTERP */