1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/barrierSetCodeGen.hpp" 27 #include "interpreter/interpreter.hpp" 28 #include "interpreter/interpreterRuntime.hpp" 29 #include "interpreter/interp_masm.hpp" 30 #include "interpreter/templateTable.hpp" 31 #include "memory/universe.inline.hpp" 32 #include "oops/methodData.hpp" 33 #include "oops/objArrayKlass.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "prims/methodHandles.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/stubRoutines.hpp" 38 #include "runtime/synchronizer.hpp" 39 #include "utilities/macros.hpp" 40 41 #define __ _masm-> 42 43 // Misc helpers 44 45 // Do an oop store like *(base + index + offset) = val 46 // index can be noreg, 47 static void do_oop_store(InterpreterMacroAssembler* _masm, 48 Register base, 49 Register index, 50 int offset, 51 Register val, 52 Register tmp, 53 DecoratorSet decorators) { 54 assert(tmp != val && tmp != base && tmp != index, "register collision"); 55 assert(index == noreg || offset == 0, "only one offset"); 56 BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->code_gen(); 57 code_gen->store_at(_masm, decorators, T_OBJECT, base, index, offset, val, tmp); 58 } 59 60 static void do_oop_load(InterpreterMacroAssembler* _masm, 61 Register base, 62 Register index, 63 int offset, 64 Register dst, 65 Register tmp, 66 DecoratorSet decorators) { 67 assert(tmp != dst && tmp != base && tmp != index, "register collision"); 68 assert(index == noreg || offset == 0, "only one offset"); 69 BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->code_gen(); 70 code_gen->load_at(_masm, decorators, T_OBJECT, base, index, offset, dst, tmp); 71 } 72 73 //---------------------------------------------------------------------------------------------------- 74 // Platform-dependent initialization 75 76 void TemplateTable::pd_initialize() { 77 // (none) 78 } 79 80 81 //---------------------------------------------------------------------------------------------------- 82 // Condition conversion 83 Assembler::Condition ccNot(TemplateTable::Condition cc) { 84 switch (cc) { 85 case TemplateTable::equal : return Assembler::notEqual; 86 case TemplateTable::not_equal : return Assembler::equal; 87 case TemplateTable::less : return Assembler::greaterEqual; 88 case TemplateTable::less_equal : return Assembler::greater; 89 case TemplateTable::greater : return Assembler::lessEqual; 90 case TemplateTable::greater_equal: return Assembler::less; 91 } 92 ShouldNotReachHere(); 93 return Assembler::zero; 94 } 95 96 //---------------------------------------------------------------------------------------------------- 97 // Miscelaneous helper routines 98 99 100 Address TemplateTable::at_bcp(int offset) { 101 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); 102 return Address(Lbcp, offset); 103 } 104 105 106 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, 107 Register temp_reg, bool load_bc_into_bc_reg/*=true*/, 108 int byte_no) { 109 // With sharing on, may need to test Method* flag. 110 if (!RewriteBytecodes) return; 111 Label L_patch_done; 112 113 switch (bc) { 114 case Bytecodes::_fast_aputfield: 115 case Bytecodes::_fast_bputfield: 116 case Bytecodes::_fast_zputfield: 117 case Bytecodes::_fast_cputfield: 118 case Bytecodes::_fast_dputfield: 119 case Bytecodes::_fast_fputfield: 120 case Bytecodes::_fast_iputfield: 121 case Bytecodes::_fast_lputfield: 122 case Bytecodes::_fast_sputfield: 123 { 124 // We skip bytecode quickening for putfield instructions when 125 // the put_code written to the constant pool cache is zero. 126 // This is required so that every execution of this instruction 127 // calls out to InterpreterRuntime::resolve_get_put to do 128 // additional, required work. 129 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 130 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 131 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1); 132 __ set(bc, bc_reg); 133 __ cmp_and_br_short(temp_reg, 0, Assembler::equal, Assembler::pn, L_patch_done); // don't patch 134 } 135 break; 136 default: 137 assert(byte_no == -1, "sanity"); 138 if (load_bc_into_bc_reg) { 139 __ set(bc, bc_reg); 140 } 141 } 142 143 if (JvmtiExport::can_post_breakpoint()) { 144 Label L_fast_patch; 145 __ ldub(at_bcp(0), temp_reg); 146 __ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch); 147 // perform the quickening, slowly, in the bowels of the breakpoint table 148 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg); 149 __ ba_short(L_patch_done); 150 __ bind(L_fast_patch); 151 } 152 153 #ifdef ASSERT 154 Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc); 155 Label L_okay; 156 __ ldub(at_bcp(0), temp_reg); 157 __ cmp(temp_reg, orig_bytecode); 158 __ br(Assembler::equal, false, Assembler::pt, L_okay); 159 __ delayed()->cmp(temp_reg, bc_reg); 160 __ br(Assembler::equal, false, Assembler::pt, L_okay); 161 __ delayed()->nop(); 162 __ stop("patching the wrong bytecode"); 163 __ bind(L_okay); 164 #endif 165 166 // patch bytecode 167 __ stb(bc_reg, at_bcp(0)); 168 __ bind(L_patch_done); 169 } 170 171 //---------------------------------------------------------------------------------------------------- 172 // Individual instructions 173 174 void TemplateTable::nop() { 175 transition(vtos, vtos); 176 // nothing to do 177 } 178 179 void TemplateTable::shouldnotreachhere() { 180 transition(vtos, vtos); 181 __ stop("shouldnotreachhere bytecode"); 182 } 183 184 void TemplateTable::aconst_null() { 185 transition(vtos, atos); 186 __ clr(Otos_i); 187 } 188 189 190 void TemplateTable::iconst(int value) { 191 transition(vtos, itos); 192 __ set(value, Otos_i); 193 } 194 195 196 void TemplateTable::lconst(int value) { 197 transition(vtos, ltos); 198 assert(value >= 0, "check this code"); 199 __ set(value, Otos_l); 200 } 201 202 203 void TemplateTable::fconst(int value) { 204 transition(vtos, ftos); 205 static float zero = 0.0, one = 1.0, two = 2.0; 206 float* p; 207 switch( value ) { 208 default: ShouldNotReachHere(); 209 case 0: p = &zero; break; 210 case 1: p = &one; break; 211 case 2: p = &two; break; 212 } 213 AddressLiteral a(p); 214 __ sethi(a, G3_scratch); 215 __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f); 216 } 217 218 219 void TemplateTable::dconst(int value) { 220 transition(vtos, dtos); 221 static double zero = 0.0, one = 1.0; 222 double* p; 223 switch( value ) { 224 default: ShouldNotReachHere(); 225 case 0: p = &zero; break; 226 case 1: p = &one; break; 227 } 228 AddressLiteral a(p); 229 __ sethi(a, G3_scratch); 230 __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d); 231 } 232 233 234 // %%%%% Should factore most snippet templates across platforms 235 236 void TemplateTable::bipush() { 237 transition(vtos, itos); 238 __ ldsb( at_bcp(1), Otos_i ); 239 } 240 241 void TemplateTable::sipush() { 242 transition(vtos, itos); 243 __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed); 244 } 245 246 void TemplateTable::ldc(bool wide) { 247 transition(vtos, vtos); 248 Label call_ldc, notInt, isString, notString, notClass, exit; 249 250 if (wide) { 251 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); 252 } else { 253 __ ldub(Lbcp, 1, O1); 254 } 255 __ get_cpool_and_tags(O0, O2); 256 257 const int base_offset = ConstantPool::header_size() * wordSize; 258 const int tags_offset = Array<u1>::base_offset_in_bytes(); 259 260 // get type from tags 261 __ add(O2, tags_offset, O2); 262 __ ldub(O2, O1, O2); 263 264 // unresolved class? If so, must resolve 265 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClass, Assembler::equal, Assembler::pt, call_ldc); 266 267 // unresolved class in error state 268 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClassInError, Assembler::equal, Assembler::pn, call_ldc); 269 270 __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class 271 __ brx(Assembler::notEqual, true, Assembler::pt, notClass); 272 __ delayed()->add(O0, base_offset, O0); 273 274 __ bind(call_ldc); 275 __ set(wide, O1); 276 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1); 277 __ push(atos); 278 __ ba_short(exit); 279 280 __ bind(notClass); 281 // __ add(O0, base_offset, O0); 282 __ sll(O1, LogBytesPerWord, O1); 283 __ cmp(O2, JVM_CONSTANT_Integer); 284 __ brx(Assembler::notEqual, true, Assembler::pt, notInt); 285 __ delayed()->cmp(O2, JVM_CONSTANT_String); 286 __ ld(O0, O1, Otos_i); 287 __ push(itos); 288 __ ba_short(exit); 289 290 __ bind(notInt); 291 // __ cmp(O2, JVM_CONSTANT_String); 292 __ brx(Assembler::notEqual, true, Assembler::pt, notString); 293 __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); 294 __ bind(isString); 295 __ stop("string should be rewritten to fast_aldc"); 296 __ ba_short(exit); 297 298 __ bind(notString); 299 // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); 300 __ push(ftos); 301 302 __ bind(exit); 303 } 304 305 // Fast path for caching oop constants. 306 // %%% We should use this to handle Class and String constants also. 307 // %%% It will simplify the ldc/primitive path considerably. 308 void TemplateTable::fast_aldc(bool wide) { 309 transition(vtos, atos); 310 311 int index_size = wide ? sizeof(u2) : sizeof(u1); 312 Label resolved; 313 314 // We are resolved if the resolved reference cache entry contains a 315 // non-null object (CallSite, etc.) 316 assert_different_registers(Otos_i, G3_scratch); 317 __ get_cache_index_at_bcp(Otos_i, G3_scratch, 1, index_size); // load index => G3_scratch 318 __ load_resolved_reference_at_index(Otos_i, G3_scratch); 319 __ tst(Otos_i); 320 __ br(Assembler::notEqual, false, Assembler::pt, resolved); 321 __ delayed()->set((int)bytecode(), O1); 322 323 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 324 325 // first time invocation - must resolve first 326 __ call_VM(Otos_i, entry, O1); 327 __ bind(resolved); 328 __ verify_oop(Otos_i); 329 } 330 331 void TemplateTable::ldc2_w() { 332 transition(vtos, vtos); 333 Label Long, exit; 334 335 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); 336 __ get_cpool_and_tags(O0, O2); 337 338 const int base_offset = ConstantPool::header_size() * wordSize; 339 const int tags_offset = Array<u1>::base_offset_in_bytes(); 340 // get type from tags 341 __ add(O2, tags_offset, O2); 342 __ ldub(O2, O1, O2); 343 344 __ sll(O1, LogBytesPerWord, O1); 345 __ add(O0, O1, G3_scratch); 346 347 __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, Long); 348 // A double can be placed at word-aligned locations in the constant pool. 349 // Check out Conversions.java for an example. 350 // Also ConstantPool::header_size() is 20, which makes it very difficult 351 // to double-align double on the constant pool. SG, 11/7/97 352 __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d); 353 __ push(dtos); 354 __ ba_short(exit); 355 356 __ bind(Long); 357 __ ldx(G3_scratch, base_offset, Otos_l); 358 __ push(ltos); 359 360 __ bind(exit); 361 } 362 363 void TemplateTable::locals_index(Register reg, int offset) { 364 __ ldub( at_bcp(offset), reg ); 365 } 366 367 void TemplateTable::locals_index_wide(Register reg) { 368 // offset is 2, not 1, because Lbcp points to wide prefix code 369 __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned); 370 } 371 372 void TemplateTable::iload() { 373 iload_internal(); 374 } 375 376 void TemplateTable::nofast_iload() { 377 iload_internal(may_not_rewrite); 378 } 379 380 void TemplateTable::iload_internal(RewriteControl rc) { 381 transition(vtos, itos); 382 // Rewrite iload,iload pair into fast_iload2 383 // iload,caload pair into fast_icaload 384 if (RewriteFrequentPairs && rc == may_rewrite) { 385 Label rewrite, done; 386 387 // get next byte 388 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch); 389 390 // if _iload, wait to rewrite to iload2. We only want to rewrite the 391 // last two iloads in a pair. Comparing against fast_iload means that 392 // the next bytecode is neither an iload or a caload, and therefore 393 // an iload pair. 394 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_iload, Assembler::equal, Assembler::pn, done); 395 396 __ cmp(G3_scratch, (int)Bytecodes::_fast_iload); 397 __ br(Assembler::equal, false, Assembler::pn, rewrite); 398 __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch); 399 400 __ cmp(G3_scratch, (int)Bytecodes::_caload); 401 __ br(Assembler::equal, false, Assembler::pn, rewrite); 402 __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch); 403 404 __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again 405 // rewrite 406 // G4_scratch: fast bytecode 407 __ bind(rewrite); 408 patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false); 409 __ bind(done); 410 } 411 412 // Get the local value into tos 413 locals_index(G3_scratch); 414 __ access_local_int( G3_scratch, Otos_i ); 415 } 416 417 void TemplateTable::fast_iload2() { 418 transition(vtos, itos); 419 locals_index(G3_scratch); 420 __ access_local_int( G3_scratch, Otos_i ); 421 __ push_i(); 422 locals_index(G3_scratch, 3); // get next bytecode's local index. 423 __ access_local_int( G3_scratch, Otos_i ); 424 } 425 426 void TemplateTable::fast_iload() { 427 transition(vtos, itos); 428 locals_index(G3_scratch); 429 __ access_local_int( G3_scratch, Otos_i ); 430 } 431 432 void TemplateTable::lload() { 433 transition(vtos, ltos); 434 locals_index(G3_scratch); 435 __ access_local_long( G3_scratch, Otos_l ); 436 } 437 438 439 void TemplateTable::fload() { 440 transition(vtos, ftos); 441 locals_index(G3_scratch); 442 __ access_local_float( G3_scratch, Ftos_f ); 443 } 444 445 446 void TemplateTable::dload() { 447 transition(vtos, dtos); 448 locals_index(G3_scratch); 449 __ access_local_double( G3_scratch, Ftos_d ); 450 } 451 452 453 void TemplateTable::aload() { 454 transition(vtos, atos); 455 locals_index(G3_scratch); 456 __ access_local_ptr( G3_scratch, Otos_i); 457 } 458 459 460 void TemplateTable::wide_iload() { 461 transition(vtos, itos); 462 locals_index_wide(G3_scratch); 463 __ access_local_int( G3_scratch, Otos_i ); 464 } 465 466 467 void TemplateTable::wide_lload() { 468 transition(vtos, ltos); 469 locals_index_wide(G3_scratch); 470 __ access_local_long( G3_scratch, Otos_l ); 471 } 472 473 474 void TemplateTable::wide_fload() { 475 transition(vtos, ftos); 476 locals_index_wide(G3_scratch); 477 __ access_local_float( G3_scratch, Ftos_f ); 478 } 479 480 481 void TemplateTable::wide_dload() { 482 transition(vtos, dtos); 483 locals_index_wide(G3_scratch); 484 __ access_local_double( G3_scratch, Ftos_d ); 485 } 486 487 488 void TemplateTable::wide_aload() { 489 transition(vtos, atos); 490 locals_index_wide(G3_scratch); 491 __ access_local_ptr( G3_scratch, Otos_i ); 492 __ verify_oop(Otos_i); 493 } 494 495 496 void TemplateTable::iaload() { 497 transition(itos, itos); 498 // Otos_i: index 499 // tos: array 500 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3); 501 __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i); 502 } 503 504 505 void TemplateTable::laload() { 506 transition(itos, ltos); 507 // Otos_i: index 508 // O2: array 509 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3); 510 __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l); 511 } 512 513 514 void TemplateTable::faload() { 515 transition(itos, ftos); 516 // Otos_i: index 517 // O2: array 518 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3); 519 __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f); 520 } 521 522 523 void TemplateTable::daload() { 524 transition(itos, dtos); 525 // Otos_i: index 526 // O2: array 527 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3); 528 __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d); 529 } 530 531 532 void TemplateTable::aaload() { 533 transition(itos, atos); 534 // Otos_i: index 535 // tos: array 536 __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3); 537 do_oop_load(_masm, 538 O3, 539 noreg, 540 arrayOopDesc::base_offset_in_bytes(T_OBJECT), 541 Otos_i, 542 G3_scratch, 543 ACCESS_ON_HEAP | ACCESS_ON_ARRAY); 544 __ verify_oop(Otos_i); 545 } 546 547 548 void TemplateTable::baload() { 549 transition(itos, itos); 550 // Otos_i: index 551 // tos: array 552 __ index_check(O2, Otos_i, 0, G3_scratch, O3); 553 __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i); 554 } 555 556 557 void TemplateTable::caload() { 558 transition(itos, itos); 559 // Otos_i: index 560 // tos: array 561 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 562 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i); 563 } 564 565 void TemplateTable::fast_icaload() { 566 transition(vtos, itos); 567 // Otos_i: index 568 // tos: array 569 locals_index(G3_scratch); 570 __ access_local_int( G3_scratch, Otos_i ); 571 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 572 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i); 573 } 574 575 576 void TemplateTable::saload() { 577 transition(itos, itos); 578 // Otos_i: index 579 // tos: array 580 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 581 __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i); 582 } 583 584 585 void TemplateTable::iload(int n) { 586 transition(vtos, itos); 587 __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i ); 588 } 589 590 591 void TemplateTable::lload(int n) { 592 transition(vtos, ltos); 593 assert(n+1 < Argument::n_register_parameters, "would need more code"); 594 __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l); 595 } 596 597 598 void TemplateTable::fload(int n) { 599 transition(vtos, ftos); 600 assert(n < Argument::n_register_parameters, "would need more code"); 601 __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f ); 602 } 603 604 605 void TemplateTable::dload(int n) { 606 transition(vtos, dtos); 607 FloatRegister dst = Ftos_d; 608 __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst); 609 } 610 611 612 void TemplateTable::aload(int n) { 613 transition(vtos, atos); 614 __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i ); 615 } 616 617 void TemplateTable::aload_0() { 618 aload_0_internal(); 619 } 620 621 void TemplateTable::nofast_aload_0() { 622 aload_0_internal(may_not_rewrite); 623 } 624 625 void TemplateTable::aload_0_internal(RewriteControl rc) { 626 transition(vtos, atos); 627 628 // According to bytecode histograms, the pairs: 629 // 630 // _aload_0, _fast_igetfield (itos) 631 // _aload_0, _fast_agetfield (atos) 632 // _aload_0, _fast_fgetfield (ftos) 633 // 634 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0 635 // bytecode checks the next bytecode and then rewrites the current 636 // bytecode into a pair bytecode; otherwise it rewrites the current 637 // bytecode into _fast_aload_0 that doesn't do the pair check anymore. 638 // 639 if (RewriteFrequentPairs && rc == may_rewrite) { 640 Label rewrite, done; 641 642 // get next byte 643 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch); 644 645 // if _getfield then wait with rewrite 646 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_getfield, Assembler::equal, Assembler::pn, done); 647 648 // if _igetfield then rewrite to _fast_iaccess_0 649 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 650 __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield); 651 __ br(Assembler::equal, false, Assembler::pn, rewrite); 652 __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch); 653 654 // if _agetfield then rewrite to _fast_aaccess_0 655 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 656 __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield); 657 __ br(Assembler::equal, false, Assembler::pn, rewrite); 658 __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch); 659 660 // if _fgetfield then rewrite to _fast_faccess_0 661 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 662 __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield); 663 __ br(Assembler::equal, false, Assembler::pn, rewrite); 664 __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch); 665 666 // else rewrite to _fast_aload0 667 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 668 __ set(Bytecodes::_fast_aload_0, G4_scratch); 669 670 // rewrite 671 // G4_scratch: fast bytecode 672 __ bind(rewrite); 673 patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false); 674 __ bind(done); 675 } 676 677 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). 678 aload(0); 679 } 680 681 void TemplateTable::istore() { 682 transition(itos, vtos); 683 locals_index(G3_scratch); 684 __ store_local_int( G3_scratch, Otos_i ); 685 } 686 687 688 void TemplateTable::lstore() { 689 transition(ltos, vtos); 690 locals_index(G3_scratch); 691 __ store_local_long( G3_scratch, Otos_l ); 692 } 693 694 695 void TemplateTable::fstore() { 696 transition(ftos, vtos); 697 locals_index(G3_scratch); 698 __ store_local_float( G3_scratch, Ftos_f ); 699 } 700 701 702 void TemplateTable::dstore() { 703 transition(dtos, vtos); 704 locals_index(G3_scratch); 705 __ store_local_double( G3_scratch, Ftos_d ); 706 } 707 708 709 void TemplateTable::astore() { 710 transition(vtos, vtos); 711 __ load_ptr(0, Otos_i); 712 __ inc(Lesp, Interpreter::stackElementSize); 713 __ verify_oop_or_return_address(Otos_i, G3_scratch); 714 locals_index(G3_scratch); 715 __ store_local_ptr(G3_scratch, Otos_i); 716 } 717 718 719 void TemplateTable::wide_istore() { 720 transition(vtos, vtos); 721 __ pop_i(); 722 locals_index_wide(G3_scratch); 723 __ store_local_int( G3_scratch, Otos_i ); 724 } 725 726 727 void TemplateTable::wide_lstore() { 728 transition(vtos, vtos); 729 __ pop_l(); 730 locals_index_wide(G3_scratch); 731 __ store_local_long( G3_scratch, Otos_l ); 732 } 733 734 735 void TemplateTable::wide_fstore() { 736 transition(vtos, vtos); 737 __ pop_f(); 738 locals_index_wide(G3_scratch); 739 __ store_local_float( G3_scratch, Ftos_f ); 740 } 741 742 743 void TemplateTable::wide_dstore() { 744 transition(vtos, vtos); 745 __ pop_d(); 746 locals_index_wide(G3_scratch); 747 __ store_local_double( G3_scratch, Ftos_d ); 748 } 749 750 751 void TemplateTable::wide_astore() { 752 transition(vtos, vtos); 753 __ load_ptr(0, Otos_i); 754 __ inc(Lesp, Interpreter::stackElementSize); 755 __ verify_oop_or_return_address(Otos_i, G3_scratch); 756 locals_index_wide(G3_scratch); 757 __ store_local_ptr(G3_scratch, Otos_i); 758 } 759 760 761 void TemplateTable::iastore() { 762 transition(itos, vtos); 763 __ pop_i(O2); // index 764 // Otos_i: val 765 // O3: array 766 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2); 767 __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT)); 768 } 769 770 771 void TemplateTable::lastore() { 772 transition(ltos, vtos); 773 __ pop_i(O2); // index 774 // Otos_l: val 775 // O3: array 776 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2); 777 __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG)); 778 } 779 780 781 void TemplateTable::fastore() { 782 transition(ftos, vtos); 783 __ pop_i(O2); // index 784 // Ftos_f: val 785 // O3: array 786 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2); 787 __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT)); 788 } 789 790 791 void TemplateTable::dastore() { 792 transition(dtos, vtos); 793 __ pop_i(O2); // index 794 // Fos_d: val 795 // O3: array 796 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2); 797 __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)); 798 } 799 800 801 void TemplateTable::aastore() { 802 Label store_ok, is_null, done; 803 transition(vtos, vtos); 804 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); 805 __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index 806 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array 807 // Otos_i: val 808 // O2: index 809 // O3: array 810 __ verify_oop(Otos_i); 811 __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1); 812 813 // do array store check - check for NULL value first 814 __ br_null_short( Otos_i, Assembler::pn, is_null ); 815 816 __ load_klass(O3, O4); // get array klass 817 __ load_klass(Otos_i, O5); // get value klass 818 819 // do fast instanceof cache test 820 821 __ ld_ptr(O4, in_bytes(ObjArrayKlass::element_klass_offset()), O4); 822 823 assert(Otos_i == O0, "just checking"); 824 825 // Otos_i: value 826 // O1: addr - offset 827 // O2: index 828 // O3: array 829 // O4: array element klass 830 // O5: value klass 831 832 // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); 833 834 // Generate a fast subtype check. Branch to store_ok if no 835 // failure. Throw if failure. 836 __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok ); 837 838 // Not a subtype; so must throw exception 839 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch ); 840 841 // Store is OK. 842 __ bind(store_ok); 843 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, ACCESS_ON_HEAP | ACCESS_ON_ARRAY); 844 845 __ ba(done); 846 __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value) 847 848 __ bind(is_null); 849 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, ACCESS_ON_HEAP | ACCESS_ON_ARRAY); 850 851 __ profile_null_seen(G3_scratch); 852 __ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value) 853 __ bind(done); 854 } 855 856 857 void TemplateTable::bastore() { 858 transition(itos, vtos); 859 __ pop_i(O2); // index 860 // Otos_i: val 861 // O2: index 862 // O3: array 863 __ index_check(O3, O2, 0, G3_scratch, O2); 864 // Need to check whether array is boolean or byte 865 // since both types share the bastore bytecode. 866 __ load_klass(O3, G4_scratch); 867 __ ld(G4_scratch, in_bytes(Klass::layout_helper_offset()), G4_scratch); 868 __ set(Klass::layout_helper_boolean_diffbit(), G3_scratch); 869 __ andcc(G3_scratch, G4_scratch, G0); 870 Label L_skip; 871 __ br(Assembler::zero, false, Assembler::pn, L_skip); 872 __ delayed()->nop(); 873 __ and3(Otos_i, 1, Otos_i); // if it is a T_BOOLEAN array, mask the stored value to 0/1 874 __ bind(L_skip); 875 __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE)); 876 } 877 878 879 void TemplateTable::castore() { 880 transition(itos, vtos); 881 __ pop_i(O2); // index 882 // Otos_i: val 883 // O3: array 884 __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2); 885 __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR)); 886 } 887 888 889 void TemplateTable::sastore() { 890 // %%%%% Factor across platform 891 castore(); 892 } 893 894 895 void TemplateTable::istore(int n) { 896 transition(itos, vtos); 897 __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n)); 898 } 899 900 901 void TemplateTable::lstore(int n) { 902 transition(ltos, vtos); 903 assert(n+1 < Argument::n_register_parameters, "only handle register cases"); 904 __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1)); 905 906 } 907 908 909 void TemplateTable::fstore(int n) { 910 transition(ftos, vtos); 911 assert(n < Argument::n_register_parameters, "only handle register cases"); 912 __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n)); 913 } 914 915 916 void TemplateTable::dstore(int n) { 917 transition(dtos, vtos); 918 FloatRegister src = Ftos_d; 919 __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1)); 920 } 921 922 923 void TemplateTable::astore(int n) { 924 transition(vtos, vtos); 925 __ load_ptr(0, Otos_i); 926 __ inc(Lesp, Interpreter::stackElementSize); 927 __ verify_oop_or_return_address(Otos_i, G3_scratch); 928 __ store_local_ptr(n, Otos_i); 929 } 930 931 932 void TemplateTable::pop() { 933 transition(vtos, vtos); 934 __ inc(Lesp, Interpreter::stackElementSize); 935 } 936 937 938 void TemplateTable::pop2() { 939 transition(vtos, vtos); 940 __ inc(Lesp, 2 * Interpreter::stackElementSize); 941 } 942 943 944 void TemplateTable::dup() { 945 transition(vtos, vtos); 946 // stack: ..., a 947 // load a and tag 948 __ load_ptr(0, Otos_i); 949 __ push_ptr(Otos_i); 950 // stack: ..., a, a 951 } 952 953 954 void TemplateTable::dup_x1() { 955 transition(vtos, vtos); 956 // stack: ..., a, b 957 __ load_ptr( 1, G3_scratch); // get a 958 __ load_ptr( 0, Otos_l1); // get b 959 __ store_ptr(1, Otos_l1); // put b 960 __ store_ptr(0, G3_scratch); // put a - like swap 961 __ push_ptr(Otos_l1); // push b 962 // stack: ..., b, a, b 963 } 964 965 966 void TemplateTable::dup_x2() { 967 transition(vtos, vtos); 968 // stack: ..., a, b, c 969 // get c and push on stack, reuse registers 970 __ load_ptr( 0, G3_scratch); // get c 971 __ push_ptr(G3_scratch); // push c with tag 972 // stack: ..., a, b, c, c (c in reg) (Lesp - 4) 973 // (stack offsets n+1 now) 974 __ load_ptr( 3, Otos_l1); // get a 975 __ store_ptr(3, G3_scratch); // put c at 3 976 // stack: ..., c, b, c, c (a in reg) 977 __ load_ptr( 2, G3_scratch); // get b 978 __ store_ptr(2, Otos_l1); // put a at 2 979 // stack: ..., c, a, c, c (b in reg) 980 __ store_ptr(1, G3_scratch); // put b at 1 981 // stack: ..., c, a, b, c 982 } 983 984 985 void TemplateTable::dup2() { 986 transition(vtos, vtos); 987 __ load_ptr(1, G3_scratch); // get a 988 __ load_ptr(0, Otos_l1); // get b 989 __ push_ptr(G3_scratch); // push a 990 __ push_ptr(Otos_l1); // push b 991 // stack: ..., a, b, a, b 992 } 993 994 995 void TemplateTable::dup2_x1() { 996 transition(vtos, vtos); 997 // stack: ..., a, b, c 998 __ load_ptr( 1, Lscratch); // get b 999 __ load_ptr( 2, Otos_l1); // get a 1000 __ store_ptr(2, Lscratch); // put b at a 1001 // stack: ..., b, b, c 1002 __ load_ptr( 0, G3_scratch); // get c 1003 __ store_ptr(1, G3_scratch); // put c at b 1004 // stack: ..., b, c, c 1005 __ store_ptr(0, Otos_l1); // put a at c 1006 // stack: ..., b, c, a 1007 __ push_ptr(Lscratch); // push b 1008 __ push_ptr(G3_scratch); // push c 1009 // stack: ..., b, c, a, b, c 1010 } 1011 1012 1013 // The spec says that these types can be a mixture of category 1 (1 word) 1014 // types and/or category 2 types (long and doubles) 1015 void TemplateTable::dup2_x2() { 1016 transition(vtos, vtos); 1017 // stack: ..., a, b, c, d 1018 __ load_ptr( 1, Lscratch); // get c 1019 __ load_ptr( 3, Otos_l1); // get a 1020 __ store_ptr(3, Lscratch); // put c at 3 1021 __ store_ptr(1, Otos_l1); // put a at 1 1022 // stack: ..., c, b, a, d 1023 __ load_ptr( 2, G3_scratch); // get b 1024 __ load_ptr( 0, Otos_l1); // get d 1025 __ store_ptr(0, G3_scratch); // put b at 0 1026 __ store_ptr(2, Otos_l1); // put d at 2 1027 // stack: ..., c, d, a, b 1028 __ push_ptr(Lscratch); // push c 1029 __ push_ptr(Otos_l1); // push d 1030 // stack: ..., c, d, a, b, c, d 1031 } 1032 1033 1034 void TemplateTable::swap() { 1035 transition(vtos, vtos); 1036 // stack: ..., a, b 1037 __ load_ptr( 1, G3_scratch); // get a 1038 __ load_ptr( 0, Otos_l1); // get b 1039 __ store_ptr(0, G3_scratch); // put b 1040 __ store_ptr(1, Otos_l1); // put a 1041 // stack: ..., b, a 1042 } 1043 1044 1045 void TemplateTable::iop2(Operation op) { 1046 transition(itos, itos); 1047 __ pop_i(O1); 1048 switch (op) { 1049 case add: __ add(O1, Otos_i, Otos_i); break; 1050 case sub: __ sub(O1, Otos_i, Otos_i); break; 1051 // %%%%% Mul may not exist: better to call .mul? 1052 case mul: __ smul(O1, Otos_i, Otos_i); break; 1053 case _and: __ and3(O1, Otos_i, Otos_i); break; 1054 case _or: __ or3(O1, Otos_i, Otos_i); break; 1055 case _xor: __ xor3(O1, Otos_i, Otos_i); break; 1056 case shl: __ sll(O1, Otos_i, Otos_i); break; 1057 case shr: __ sra(O1, Otos_i, Otos_i); break; 1058 case ushr: __ srl(O1, Otos_i, Otos_i); break; 1059 default: ShouldNotReachHere(); 1060 } 1061 } 1062 1063 1064 void TemplateTable::lop2(Operation op) { 1065 transition(ltos, ltos); 1066 __ pop_l(O2); 1067 switch (op) { 1068 case add: __ add(O2, Otos_l, Otos_l); break; 1069 case sub: __ sub(O2, Otos_l, Otos_l); break; 1070 case _and: __ and3(O2, Otos_l, Otos_l); break; 1071 case _or: __ or3(O2, Otos_l, Otos_l); break; 1072 case _xor: __ xor3(O2, Otos_l, Otos_l); break; 1073 default: ShouldNotReachHere(); 1074 } 1075 } 1076 1077 1078 void TemplateTable::idiv() { 1079 // %%%%% Later: ForSPARC/V7 call .sdiv library routine, 1080 // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe. 1081 1082 transition(itos, itos); 1083 __ pop_i(O1); // get 1st op 1084 1085 // Y contains upper 32 bits of result, set it to 0 or all ones 1086 __ wry(G0); 1087 __ mov(~0, G3_scratch); 1088 1089 __ tst(O1); 1090 Label neg; 1091 __ br(Assembler::negative, true, Assembler::pn, neg); 1092 __ delayed()->wry(G3_scratch); 1093 __ bind(neg); 1094 1095 Label ok; 1096 __ tst(Otos_i); 1097 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch ); 1098 1099 const int min_int = 0x80000000; 1100 Label regular; 1101 __ cmp(Otos_i, -1); 1102 __ br(Assembler::notEqual, false, Assembler::pt, regular); 1103 // Don't put set in delay slot 1104 // Set will turn into multiple instructions in 64 bit mode 1105 __ delayed()->nop(); 1106 __ set(min_int, G4_scratch); 1107 Label done; 1108 __ cmp(O1, G4_scratch); 1109 __ br(Assembler::equal, true, Assembler::pt, done); 1110 __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken) 1111 1112 __ bind(regular); 1113 __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction! 1114 __ bind(done); 1115 } 1116 1117 1118 void TemplateTable::irem() { 1119 transition(itos, itos); 1120 __ mov(Otos_i, O2); // save divisor 1121 idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1 1122 __ smul(Otos_i, O2, Otos_i); 1123 __ sub(O1, Otos_i, Otos_i); 1124 } 1125 1126 1127 void TemplateTable::lmul() { 1128 transition(ltos, ltos); 1129 __ pop_l(O2); 1130 __ mulx(Otos_l, O2, Otos_l); 1131 1132 } 1133 1134 1135 void TemplateTable::ldiv() { 1136 transition(ltos, ltos); 1137 1138 // check for zero 1139 __ pop_l(O2); 1140 __ tst(Otos_l); 1141 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1142 __ sdivx(O2, Otos_l, Otos_l); 1143 } 1144 1145 1146 void TemplateTable::lrem() { 1147 transition(ltos, ltos); 1148 1149 // check for zero 1150 __ pop_l(O2); 1151 __ tst(Otos_l); 1152 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1153 __ sdivx(O2, Otos_l, Otos_l2); 1154 __ mulx (Otos_l2, Otos_l, Otos_l2); 1155 __ sub (O2, Otos_l2, Otos_l); 1156 } 1157 1158 1159 void TemplateTable::lshl() { 1160 transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra 1161 1162 __ pop_l(O2); // shift value in O2, O3 1163 __ sllx(O2, Otos_i, Otos_l); 1164 } 1165 1166 1167 void TemplateTable::lshr() { 1168 transition(itos, ltos); // %%%% see lshl comment 1169 1170 __ pop_l(O2); // shift value in O2, O3 1171 __ srax(O2, Otos_i, Otos_l); 1172 } 1173 1174 1175 1176 void TemplateTable::lushr() { 1177 transition(itos, ltos); // %%%% see lshl comment 1178 1179 __ pop_l(O2); // shift value in O2, O3 1180 __ srlx(O2, Otos_i, Otos_l); 1181 } 1182 1183 1184 void TemplateTable::fop2(Operation op) { 1185 transition(ftos, ftos); 1186 switch (op) { 1187 case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1188 case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1189 case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1190 case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1191 case rem: 1192 assert(Ftos_f == F0, "just checking"); 1193 // LP64 calling conventions use F1, F3 for passing 2 floats 1194 __ pop_f(F1); 1195 __ fmov(FloatRegisterImpl::S, Ftos_f, F3); 1196 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1197 assert( Ftos_f == F0, "fix this code" ); 1198 break; 1199 1200 default: ShouldNotReachHere(); 1201 } 1202 } 1203 1204 1205 void TemplateTable::dop2(Operation op) { 1206 transition(dtos, dtos); 1207 switch (op) { 1208 case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1209 case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1210 case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1211 case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1212 case rem: 1213 // Pass arguments in D0, D2 1214 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 ); 1215 __ pop_d( F0 ); 1216 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1217 assert( Ftos_d == F0, "fix this code" ); 1218 break; 1219 1220 default: ShouldNotReachHere(); 1221 } 1222 } 1223 1224 1225 void TemplateTable::ineg() { 1226 transition(itos, itos); 1227 __ neg(Otos_i); 1228 } 1229 1230 1231 void TemplateTable::lneg() { 1232 transition(ltos, ltos); 1233 __ sub(G0, Otos_l, Otos_l); 1234 } 1235 1236 1237 void TemplateTable::fneg() { 1238 transition(ftos, ftos); 1239 __ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f); 1240 } 1241 1242 1243 void TemplateTable::dneg() { 1244 transition(dtos, dtos); 1245 __ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f); 1246 } 1247 1248 1249 void TemplateTable::iinc() { 1250 transition(vtos, vtos); 1251 locals_index(G3_scratch); 1252 __ ldsb(Lbcp, 2, O2); // load constant 1253 __ access_local_int(G3_scratch, Otos_i); 1254 __ add(Otos_i, O2, Otos_i); 1255 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch 1256 } 1257 1258 1259 void TemplateTable::wide_iinc() { 1260 transition(vtos, vtos); 1261 locals_index_wide(G3_scratch); 1262 __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed); 1263 __ access_local_int(G3_scratch, Otos_i); 1264 __ add(Otos_i, O3, Otos_i); 1265 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch 1266 } 1267 1268 1269 void TemplateTable::convert() { 1270 // %%%%% Factor this first part accross platforms 1271 #ifdef ASSERT 1272 TosState tos_in = ilgl; 1273 TosState tos_out = ilgl; 1274 switch (bytecode()) { 1275 case Bytecodes::_i2l: // fall through 1276 case Bytecodes::_i2f: // fall through 1277 case Bytecodes::_i2d: // fall through 1278 case Bytecodes::_i2b: // fall through 1279 case Bytecodes::_i2c: // fall through 1280 case Bytecodes::_i2s: tos_in = itos; break; 1281 case Bytecodes::_l2i: // fall through 1282 case Bytecodes::_l2f: // fall through 1283 case Bytecodes::_l2d: tos_in = ltos; break; 1284 case Bytecodes::_f2i: // fall through 1285 case Bytecodes::_f2l: // fall through 1286 case Bytecodes::_f2d: tos_in = ftos; break; 1287 case Bytecodes::_d2i: // fall through 1288 case Bytecodes::_d2l: // fall through 1289 case Bytecodes::_d2f: tos_in = dtos; break; 1290 default : ShouldNotReachHere(); 1291 } 1292 switch (bytecode()) { 1293 case Bytecodes::_l2i: // fall through 1294 case Bytecodes::_f2i: // fall through 1295 case Bytecodes::_d2i: // fall through 1296 case Bytecodes::_i2b: // fall through 1297 case Bytecodes::_i2c: // fall through 1298 case Bytecodes::_i2s: tos_out = itos; break; 1299 case Bytecodes::_i2l: // fall through 1300 case Bytecodes::_f2l: // fall through 1301 case Bytecodes::_d2l: tos_out = ltos; break; 1302 case Bytecodes::_i2f: // fall through 1303 case Bytecodes::_l2f: // fall through 1304 case Bytecodes::_d2f: tos_out = ftos; break; 1305 case Bytecodes::_i2d: // fall through 1306 case Bytecodes::_l2d: // fall through 1307 case Bytecodes::_f2d: tos_out = dtos; break; 1308 default : ShouldNotReachHere(); 1309 } 1310 transition(tos_in, tos_out); 1311 #endif 1312 1313 1314 // Conversion 1315 Label done; 1316 switch (bytecode()) { 1317 case Bytecodes::_i2l: 1318 // Sign extend the 32 bits 1319 __ sra ( Otos_i, 0, Otos_l ); 1320 break; 1321 1322 case Bytecodes::_i2f: 1323 __ st(Otos_i, __ d_tmp ); 1324 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); 1325 __ fitof(FloatRegisterImpl::S, F0, Ftos_f); 1326 break; 1327 1328 case Bytecodes::_i2d: 1329 __ st(Otos_i, __ d_tmp); 1330 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); 1331 __ fitof(FloatRegisterImpl::D, F0, Ftos_f); 1332 break; 1333 1334 case Bytecodes::_i2b: 1335 __ sll(Otos_i, 24, Otos_i); 1336 __ sra(Otos_i, 24, Otos_i); 1337 break; 1338 1339 case Bytecodes::_i2c: 1340 __ sll(Otos_i, 16, Otos_i); 1341 __ srl(Otos_i, 16, Otos_i); 1342 break; 1343 1344 case Bytecodes::_i2s: 1345 __ sll(Otos_i, 16, Otos_i); 1346 __ sra(Otos_i, 16, Otos_i); 1347 break; 1348 1349 case Bytecodes::_l2i: 1350 // Sign-extend into the high 32 bits 1351 __ sra(Otos_l, 0, Otos_i); 1352 break; 1353 1354 case Bytecodes::_l2f: 1355 case Bytecodes::_l2d: 1356 __ st_long(Otos_l, __ d_tmp); 1357 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d); 1358 1359 if (bytecode() == Bytecodes::_l2f) { 1360 __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f); 1361 } else { 1362 __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d); 1363 } 1364 break; 1365 1366 case Bytecodes::_f2i: { 1367 Label isNaN; 1368 // result must be 0 if value is NaN; test by comparing value to itself 1369 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f); 1370 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN); 1371 __ delayed()->clr(Otos_i); // NaN 1372 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30); 1373 __ stf(FloatRegisterImpl::S, F30, __ d_tmp); 1374 __ ld(__ d_tmp, Otos_i); 1375 __ bind(isNaN); 1376 } 1377 break; 1378 1379 case Bytecodes::_f2l: 1380 // must uncache tos 1381 __ push_f(); 1382 __ pop_f(F1); 1383 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l)); 1384 break; 1385 1386 case Bytecodes::_f2d: 1387 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f); 1388 break; 1389 1390 case Bytecodes::_d2i: 1391 case Bytecodes::_d2l: 1392 // must uncache tos 1393 __ push_d(); 1394 // LP64 calling conventions pass first double arg in D0 1395 __ pop_d( Ftos_d ); 1396 __ call_VM_leaf(Lscratch, 1397 bytecode() == Bytecodes::_d2i 1398 ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i) 1399 : CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); 1400 break; 1401 1402 case Bytecodes::_d2f: 1403 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f); 1404 break; 1405 1406 default: ShouldNotReachHere(); 1407 } 1408 __ bind(done); 1409 } 1410 1411 1412 void TemplateTable::lcmp() { 1413 transition(ltos, itos); 1414 1415 __ pop_l(O1); // pop off value 1, value 2 is in O0 1416 __ lcmp( O1, Otos_l, Otos_i ); 1417 } 1418 1419 1420 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1421 1422 if (is_float) __ pop_f(F2); 1423 else __ pop_d(F2); 1424 1425 assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:"); 1426 1427 __ float_cmp( is_float, unordered_result, F2, F0, Otos_i ); 1428 } 1429 1430 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1431 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1432 __ verify_thread(); 1433 1434 const Register O2_bumped_count = O2; 1435 __ profile_taken_branch(G3_scratch, O2_bumped_count); 1436 1437 // get (wide) offset to O1_disp 1438 const Register O1_disp = O1; 1439 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC); 1440 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC); 1441 1442 // Handle all the JSR stuff here, then exit. 1443 // It's much shorter and cleaner than intermingling with the 1444 // non-JSR normal-branch stuff occurring below. 1445 if( is_jsr ) { 1446 // compute return address as bci in Otos_i 1447 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1448 __ sub(Lbcp, G3_scratch, G3_scratch); 1449 __ sub(G3_scratch, in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3), Otos_i); 1450 1451 // Bump Lbcp to target of JSR 1452 __ add(Lbcp, O1_disp, Lbcp); 1453 // Push returnAddress for "ret" on stack 1454 __ push_ptr(Otos_i); 1455 // And away we go! 1456 __ dispatch_next(vtos); 1457 return; 1458 } 1459 1460 // Normal (non-jsr) branch handling 1461 1462 // Save the current Lbcp 1463 const Register l_cur_bcp = Lscratch; 1464 __ mov( Lbcp, l_cur_bcp ); 1465 1466 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1467 if ( increment_invocation_counter_for_backward_branches ) { 1468 Label Lforward; 1469 // check branch direction 1470 __ br( Assembler::positive, false, Assembler::pn, Lforward ); 1471 // Bump bytecode pointer by displacement (take the branch) 1472 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr 1473 1474 const Register G3_method_counters = G3_scratch; 1475 __ get_method_counters(Lmethod, G3_method_counters, Lforward); 1476 1477 if (TieredCompilation) { 1478 Label Lno_mdo, Loverflow; 1479 int increment = InvocationCounter::count_increment; 1480 if (ProfileInterpreter) { 1481 // If no method data exists, go to profile_continue. 1482 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch); 1483 __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo); 1484 1485 // Increment backedge counter in the MDO 1486 Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) + 1487 in_bytes(InvocationCounter::counter_offset())); 1488 Address mask(G4_scratch, in_bytes(MethodData::backedge_mask_offset())); 1489 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0, 1490 (UseOnStackReplacement ? Assembler::notZero : Assembler::always), &Lforward); 1491 __ ba_short(Loverflow); 1492 } 1493 1494 // If there's no MDO, increment counter in MethodCounters* 1495 __ bind(Lno_mdo); 1496 Address backedge_counter(G3_method_counters, 1497 in_bytes(MethodCounters::backedge_counter_offset()) + 1498 in_bytes(InvocationCounter::counter_offset())); 1499 Address mask(G3_method_counters, in_bytes(MethodCounters::backedge_mask_offset())); 1500 __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0, 1501 (UseOnStackReplacement ? Assembler::notZero : Assembler::always), &Lforward); 1502 __ bind(Loverflow); 1503 1504 // notify point for loop, pass branch bytecode 1505 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), l_cur_bcp); 1506 1507 // Was an OSR adapter generated? 1508 // O0 = osr nmethod 1509 __ br_null_short(O0, Assembler::pn, Lforward); 1510 1511 // Has the nmethod been invalidated already? 1512 __ ldub(O0, nmethod::state_offset(), O2); 1513 __ cmp_and_br_short(O2, nmethod::in_use, Assembler::notEqual, Assembler::pn, Lforward); 1514 1515 // migrate the interpreter frame off of the stack 1516 1517 __ mov(G2_thread, L7); 1518 // save nmethod 1519 __ mov(O0, L6); 1520 __ set_last_Java_frame(SP, noreg); 1521 __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); 1522 __ reset_last_Java_frame(); 1523 __ mov(L7, G2_thread); 1524 1525 // move OSR nmethod to I1 1526 __ mov(L6, I1); 1527 1528 // OSR buffer to I0 1529 __ mov(O0, I0); 1530 1531 // remove the interpreter frame 1532 __ restore(I5_savedSP, 0, SP); 1533 1534 // Jump to the osr code. 1535 __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); 1536 __ jmp(O2, G0); 1537 __ delayed()->nop(); 1538 1539 } else { // not TieredCompilation 1540 // Update Backedge branch separately from invocations 1541 const Register G4_invoke_ctr = G4; 1542 __ increment_backedge_counter(G3_method_counters, G4_invoke_ctr, G1_scratch); 1543 if (ProfileInterpreter) { 1544 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_method_counters, G1_scratch, Lforward); 1545 if (UseOnStackReplacement) { 1546 1547 __ test_backedge_count_for_osr(O2_bumped_count, G3_method_counters, l_cur_bcp, G1_scratch); 1548 } 1549 } else { 1550 if (UseOnStackReplacement) { 1551 __ test_backedge_count_for_osr(G4_invoke_ctr, G3_method_counters, l_cur_bcp, G1_scratch); 1552 } 1553 } 1554 } 1555 1556 __ bind(Lforward); 1557 } else 1558 // Bump bytecode pointer by displacement (take the branch) 1559 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr 1560 1561 // continue with bytecode @ target 1562 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above, 1563 // %%%%% and changing dispatch_next to dispatch_only 1564 __ dispatch_next(vtos); 1565 } 1566 1567 1568 // Note Condition in argument is TemplateTable::Condition 1569 // arg scope is within class scope 1570 1571 void TemplateTable::if_0cmp(Condition cc) { 1572 // no pointers, integer only! 1573 transition(itos, vtos); 1574 // assume branch is more often taken than not (loops use backward branches) 1575 __ cmp( Otos_i, 0); 1576 __ if_cmp(ccNot(cc), false); 1577 } 1578 1579 1580 void TemplateTable::if_icmp(Condition cc) { 1581 transition(itos, vtos); 1582 __ pop_i(O1); 1583 __ cmp(O1, Otos_i); 1584 __ if_cmp(ccNot(cc), false); 1585 } 1586 1587 1588 void TemplateTable::if_nullcmp(Condition cc) { 1589 transition(atos, vtos); 1590 __ tst(Otos_i); 1591 __ if_cmp(ccNot(cc), true); 1592 } 1593 1594 1595 void TemplateTable::if_acmp(Condition cc) { 1596 transition(atos, vtos); 1597 __ pop_ptr(O1); 1598 __ verify_oop(O1); 1599 __ verify_oop(Otos_i); 1600 __ cmp(O1, Otos_i); 1601 __ if_cmp(ccNot(cc), true); 1602 } 1603 1604 1605 1606 void TemplateTable::ret() { 1607 transition(vtos, vtos); 1608 locals_index(G3_scratch); 1609 __ access_local_returnAddress(G3_scratch, Otos_i); 1610 // Otos_i contains the bci, compute the bcp from that 1611 1612 #ifdef ASSERT 1613 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC 1614 // the result. The return address (really a BCI) was stored with an 1615 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in 1616 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit 1617 // loaded value. 1618 { Label zzz ; 1619 __ set (65536, G3_scratch) ; 1620 __ cmp (Otos_i, G3_scratch) ; 1621 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz); 1622 __ delayed()->nop(); 1623 __ stop("BCI is in the wrong register half?"); 1624 __ bind (zzz) ; 1625 } 1626 #endif 1627 1628 __ profile_ret(vtos, Otos_i, G4_scratch); 1629 1630 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1631 __ add(G3_scratch, Otos_i, G3_scratch); 1632 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp); 1633 __ dispatch_next(vtos); 1634 } 1635 1636 1637 void TemplateTable::wide_ret() { 1638 transition(vtos, vtos); 1639 locals_index_wide(G3_scratch); 1640 __ access_local_returnAddress(G3_scratch, Otos_i); 1641 // Otos_i contains the bci, compute the bcp from that 1642 1643 __ profile_ret(vtos, Otos_i, G4_scratch); 1644 1645 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1646 __ add(G3_scratch, Otos_i, G3_scratch); 1647 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp); 1648 __ dispatch_next(vtos); 1649 } 1650 1651 1652 void TemplateTable::tableswitch() { 1653 transition(itos, vtos); 1654 Label default_case, continue_execution; 1655 1656 // align bcp 1657 __ add(Lbcp, BytesPerInt, O1); 1658 __ and3(O1, -BytesPerInt, O1); 1659 // load lo, hi 1660 __ ld(O1, 1 * BytesPerInt, O2); // Low Byte 1661 __ ld(O1, 2 * BytesPerInt, O3); // High Byte 1662 // Sign extend the 32 bits 1663 __ sra ( Otos_i, 0, Otos_i ); 1664 1665 // check against lo & hi 1666 __ cmp( Otos_i, O2); 1667 __ br( Assembler::less, false, Assembler::pn, default_case); 1668 __ delayed()->cmp( Otos_i, O3 ); 1669 __ br( Assembler::greater, false, Assembler::pn, default_case); 1670 // lookup dispatch offset 1671 __ delayed()->sub(Otos_i, O2, O2); 1672 __ profile_switch_case(O2, O3, G3_scratch, G4_scratch); 1673 __ sll(O2, LogBytesPerInt, O2); 1674 __ add(O2, 3 * BytesPerInt, O2); 1675 __ ba(continue_execution); 1676 __ delayed()->ld(O1, O2, O2); 1677 // handle default 1678 __ bind(default_case); 1679 __ profile_switch_default(O3); 1680 __ ld(O1, 0, O2); // get default offset 1681 // continue execution 1682 __ bind(continue_execution); 1683 __ add(Lbcp, O2, Lbcp); 1684 __ dispatch_next(vtos); 1685 } 1686 1687 1688 void TemplateTable::lookupswitch() { 1689 transition(itos, itos); 1690 __ stop("lookupswitch bytecode should have been rewritten"); 1691 } 1692 1693 void TemplateTable::fast_linearswitch() { 1694 transition(itos, vtos); 1695 Label loop_entry, loop, found, continue_execution; 1696 // align bcp 1697 __ add(Lbcp, BytesPerInt, O1); 1698 __ and3(O1, -BytesPerInt, O1); 1699 // set counter 1700 __ ld(O1, BytesPerInt, O2); 1701 __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs 1702 __ add(O1, 2 * BytesPerInt, O3); // set first pair addr 1703 __ ba(loop_entry); 1704 __ delayed()->add(O3, O2, O2); // counter now points past last pair 1705 1706 // table search 1707 __ bind(loop); 1708 __ cmp(O4, Otos_i); 1709 __ br(Assembler::equal, true, Assembler::pn, found); 1710 __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4 1711 __ inc(O3, 2 * BytesPerInt); 1712 1713 __ bind(loop_entry); 1714 __ cmp(O2, O3); 1715 __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop); 1716 __ delayed()->ld(O3, 0, O4); 1717 1718 // default case 1719 __ ld(O1, 0, O4); // get default offset 1720 if (ProfileInterpreter) { 1721 __ profile_switch_default(O3); 1722 __ ba_short(continue_execution); 1723 } 1724 1725 // entry found -> get offset 1726 __ bind(found); 1727 if (ProfileInterpreter) { 1728 __ sub(O3, O1, O3); 1729 __ sub(O3, 2*BytesPerInt, O3); 1730 __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs 1731 __ profile_switch_case(O3, O1, O2, G3_scratch); 1732 1733 __ bind(continue_execution); 1734 } 1735 __ add(Lbcp, O4, Lbcp); 1736 __ dispatch_next(vtos); 1737 } 1738 1739 1740 void TemplateTable::fast_binaryswitch() { 1741 transition(itos, vtos); 1742 // Implementation using the following core algorithm: (copied from Intel) 1743 // 1744 // int binary_search(int key, LookupswitchPair* array, int n) { 1745 // // Binary search according to "Methodik des Programmierens" by 1746 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 1747 // int i = 0; 1748 // int j = n; 1749 // while (i+1 < j) { 1750 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 1751 // // with Q: for all i: 0 <= i < n: key < a[i] 1752 // // where a stands for the array and assuming that the (inexisting) 1753 // // element a[n] is infinitely big. 1754 // int h = (i + j) >> 1; 1755 // // i < h < j 1756 // if (key < array[h].fast_match()) { 1757 // j = h; 1758 // } else { 1759 // i = h; 1760 // } 1761 // } 1762 // // R: a[i] <= key < a[i+1] or Q 1763 // // (i.e., if key is within array, i is the correct index) 1764 // return i; 1765 // } 1766 1767 // register allocation 1768 assert(Otos_i == O0, "alias checking"); 1769 const Register Rkey = Otos_i; // already set (tosca) 1770 const Register Rarray = O1; 1771 const Register Ri = O2; 1772 const Register Rj = O3; 1773 const Register Rh = O4; 1774 const Register Rscratch = O5; 1775 1776 const int log_entry_size = 3; 1777 const int entry_size = 1 << log_entry_size; 1778 1779 Label found; 1780 // Find Array start 1781 __ add(Lbcp, 3 * BytesPerInt, Rarray); 1782 __ and3(Rarray, -BytesPerInt, Rarray); 1783 // initialize i & j (in delay slot) 1784 __ clr( Ri ); 1785 1786 // and start 1787 Label entry; 1788 __ ba(entry); 1789 __ delayed()->ld( Rarray, -BytesPerInt, Rj); 1790 // (Rj is already in the native byte-ordering.) 1791 1792 // binary search loop 1793 { Label loop; 1794 __ bind( loop ); 1795 // int h = (i + j) >> 1; 1796 __ sra( Rh, 1, Rh ); 1797 // if (key < array[h].fast_match()) { 1798 // j = h; 1799 // } else { 1800 // i = h; 1801 // } 1802 __ sll( Rh, log_entry_size, Rscratch ); 1803 __ ld( Rarray, Rscratch, Rscratch ); 1804 // (Rscratch is already in the native byte-ordering.) 1805 __ cmp( Rkey, Rscratch ); 1806 __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match()) 1807 __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match()) 1808 1809 // while (i+1 < j) 1810 __ bind( entry ); 1811 __ add( Ri, 1, Rscratch ); 1812 __ cmp(Rscratch, Rj); 1813 __ br( Assembler::less, true, Assembler::pt, loop ); 1814 __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1; 1815 } 1816 1817 // end of binary search, result index is i (must check again!) 1818 Label default_case; 1819 Label continue_execution; 1820 if (ProfileInterpreter) { 1821 __ mov( Ri, Rh ); // Save index in i for profiling 1822 } 1823 __ sll( Ri, log_entry_size, Ri ); 1824 __ ld( Rarray, Ri, Rscratch ); 1825 // (Rscratch is already in the native byte-ordering.) 1826 __ cmp( Rkey, Rscratch ); 1827 __ br( Assembler::notEqual, true, Assembler::pn, default_case ); 1828 __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j 1829 1830 // entry found -> j = offset 1831 __ inc( Ri, BytesPerInt ); 1832 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 1833 __ ld( Rarray, Ri, Rj ); 1834 // (Rj is already in the native byte-ordering.) 1835 1836 if (ProfileInterpreter) { 1837 __ ba_short(continue_execution); 1838 } 1839 1840 __ bind(default_case); // fall through (if not profiling) 1841 __ profile_switch_default(Ri); 1842 1843 __ bind(continue_execution); 1844 __ add( Lbcp, Rj, Lbcp ); 1845 __ dispatch_next( vtos ); 1846 } 1847 1848 1849 void TemplateTable::_return(TosState state) { 1850 transition(state, state); 1851 assert(_desc->calls_vm(), "inconsistent calls_vm information"); 1852 1853 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 1854 assert(state == vtos, "only valid state"); 1855 __ mov(G0, G3_scratch); 1856 __ access_local_ptr(G3_scratch, Otos_i); 1857 __ load_klass(Otos_i, O2); 1858 __ set(JVM_ACC_HAS_FINALIZER, G3); 1859 __ ld(O2, in_bytes(Klass::access_flags_offset()), O2); 1860 __ andcc(G3, O2, G0); 1861 Label skip_register_finalizer; 1862 __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer); 1863 __ delayed()->nop(); 1864 1865 // Call out to do finalizer registration 1866 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i); 1867 1868 __ bind(skip_register_finalizer); 1869 } 1870 1871 // Narrow result if state is itos but result type is smaller. 1872 // Need to narrow in the return bytecode rather than in generate_return_entry 1873 // since compiled code callers expect the result to already be narrowed. 1874 if (state == itos) { 1875 __ narrow(Otos_i); 1876 } 1877 __ remove_activation(state, /* throw_monitor_exception */ true); 1878 1879 // The caller's SP was adjusted upon method entry to accomodate 1880 // the callee's non-argument locals. Undo that adjustment. 1881 __ ret(); // return to caller 1882 __ delayed()->restore(I5_savedSP, G0, SP); 1883 } 1884 1885 1886 // ---------------------------------------------------------------------------- 1887 // Volatile variables demand their effects be made known to all CPU's in 1888 // order. Store buffers on most chips allow reads & writes to reorder; the 1889 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of 1890 // memory barrier (i.e., it's not sufficient that the interpreter does not 1891 // reorder volatile references, the hardware also must not reorder them). 1892 // 1893 // According to the new Java Memory Model (JMM): 1894 // (1) All volatiles are serialized wrt to each other. 1895 // ALSO reads & writes act as aquire & release, so: 1896 // (2) A read cannot let unrelated NON-volatile memory refs that happen after 1897 // the read float up to before the read. It's OK for non-volatile memory refs 1898 // that happen before the volatile read to float down below it. 1899 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs 1900 // that happen BEFORE the write float down to after the write. It's OK for 1901 // non-volatile memory refs that happen after the volatile write to float up 1902 // before it. 1903 // 1904 // We only put in barriers around volatile refs (they are expensive), not 1905 // _between_ memory refs (that would require us to track the flavor of the 1906 // previous memory refs). Requirements (2) and (3) require some barriers 1907 // before volatile stores and after volatile loads. These nearly cover 1908 // requirement (1) but miss the volatile-store-volatile-load case. This final 1909 // case is placed after volatile-stores although it could just as well go 1910 // before volatile-loads. 1911 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) { 1912 // Helper function to insert a is-volatile test and memory barrier 1913 // All current sparc implementations run in TSO, needing only StoreLoad 1914 if ((order_constraint & Assembler::StoreLoad) == 0) return; 1915 __ membar( order_constraint ); 1916 } 1917 1918 // ---------------------------------------------------------------------------- 1919 void TemplateTable::resolve_cache_and_index(int byte_no, 1920 Register Rcache, 1921 Register index, 1922 size_t index_size) { 1923 // Depends on cpCacheOop layout! 1924 1925 Label resolved; 1926 Bytecodes::Code code = bytecode(); 1927 switch (code) { 1928 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; 1929 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; 1930 } 1931 1932 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 1933 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size); 1934 __ cmp(Lbyte_code, code); // have we resolved this bytecode? 1935 __ br(Assembler::equal, false, Assembler::pt, resolved); 1936 __ delayed()->set(code, O1); 1937 1938 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 1939 // first time invocation - must resolve first 1940 __ call_VM(noreg, entry, O1); 1941 // Update registers with resolved info 1942 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); 1943 __ bind(resolved); 1944 } 1945 1946 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 1947 Register method, 1948 Register itable_index, 1949 Register flags, 1950 bool is_invokevirtual, 1951 bool is_invokevfinal, 1952 bool is_invokedynamic) { 1953 // Uses both G3_scratch and G4_scratch 1954 Register cache = G3_scratch; 1955 Register index = G4_scratch; 1956 assert_different_registers(cache, method, itable_index); 1957 1958 // determine constant pool cache field offsets 1959 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 1960 const int method_offset = in_bytes( 1961 ConstantPoolCache::base_offset() + 1962 ((byte_no == f2_byte) 1963 ? ConstantPoolCacheEntry::f2_offset() 1964 : ConstantPoolCacheEntry::f1_offset() 1965 ) 1966 ); 1967 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() + 1968 ConstantPoolCacheEntry::flags_offset()); 1969 // access constant pool cache fields 1970 const int index_offset = in_bytes(ConstantPoolCache::base_offset() + 1971 ConstantPoolCacheEntry::f2_offset()); 1972 1973 if (is_invokevfinal) { 1974 __ get_cache_and_index_at_bcp(cache, index, 1); 1975 __ ld_ptr(Address(cache, method_offset), method); 1976 } else { 1977 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); 1978 resolve_cache_and_index(byte_no, cache, index, index_size); 1979 __ ld_ptr(Address(cache, method_offset), method); 1980 } 1981 1982 if (itable_index != noreg) { 1983 // pick up itable or appendix index from f2 also: 1984 __ ld_ptr(Address(cache, index_offset), itable_index); 1985 } 1986 __ ld_ptr(Address(cache, flags_offset), flags); 1987 } 1988 1989 // The Rcache register must be set before call 1990 void TemplateTable::load_field_cp_cache_entry(Register Robj, 1991 Register Rcache, 1992 Register index, 1993 Register Roffset, 1994 Register Rflags, 1995 bool is_static) { 1996 assert_different_registers(Rcache, Rflags, Roffset); 1997 1998 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 1999 2000 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2001 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2002 if (is_static) { 2003 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj); 2004 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 2005 __ ld_ptr( Robj, mirror_offset, Robj); 2006 } 2007 } 2008 2009 // The registers Rcache and index expected to be set before call. 2010 // Correct values of the Rcache and index registers are preserved. 2011 void TemplateTable::jvmti_post_field_access(Register Rcache, 2012 Register index, 2013 bool is_static, 2014 bool has_tos) { 2015 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2016 2017 if (JvmtiExport::can_post_field_access()) { 2018 // Check to see if a field access watch has been set before we take 2019 // the time to call into the VM. 2020 Label Label1; 2021 assert_different_registers(Rcache, index, G1_scratch); 2022 AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr()); 2023 __ load_contents(get_field_access_count_addr, G1_scratch); 2024 __ cmp_and_br_short(G1_scratch, 0, Assembler::equal, Assembler::pt, Label1); 2025 2026 __ add(Rcache, in_bytes(cp_base_offset), Rcache); 2027 2028 if (is_static) { 2029 __ clr(Otos_i); 2030 } else { 2031 if (has_tos) { 2032 // save object pointer before call_VM() clobbers it 2033 __ push_ptr(Otos_i); // put object on tos where GC wants it. 2034 } else { 2035 // Load top of stack (do not pop the value off the stack); 2036 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); 2037 } 2038 __ verify_oop(Otos_i); 2039 } 2040 // Otos_i: object pointer or NULL if static 2041 // Rcache: cache entry pointer 2042 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), 2043 Otos_i, Rcache); 2044 if (!is_static && has_tos) { 2045 __ pop_ptr(Otos_i); // restore object pointer 2046 __ verify_oop(Otos_i); 2047 } 2048 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2049 __ bind(Label1); 2050 } 2051 } 2052 2053 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2054 transition(vtos, vtos); 2055 2056 Register Rcache = G3_scratch; 2057 Register index = G4_scratch; 2058 Register Rclass = Rcache; 2059 Register Roffset= G4_scratch; 2060 Register Rflags = G1_scratch; 2061 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2062 2063 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2)); 2064 jvmti_post_field_access(Rcache, index, is_static, false); 2065 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); 2066 2067 if (!is_static) { 2068 pop_and_check_object(Rclass); 2069 } else { 2070 __ verify_oop(Rclass); 2071 } 2072 2073 Label exit; 2074 2075 Assembler::Membar_mask_bits membar_bits = 2076 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2077 2078 if (__ membar_has_effect(membar_bits)) { 2079 // Get volatile flag 2080 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2081 __ and3(Rflags, Lscratch, Lscratch); 2082 } 2083 2084 Label checkVolatile; 2085 2086 // compute field type 2087 Label notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj; 2088 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2089 // Make sure we don't need to mask Rflags after the above shift 2090 ConstantPoolCacheEntry::verify_tos_state_shift(); 2091 2092 // Check atos before itos for getstatic, more likely (in Queens at least) 2093 __ cmp(Rflags, atos); 2094 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2095 __ delayed() ->cmp(Rflags, itos); 2096 2097 // atos 2098 __ load_heap_oop(Rclass, Roffset, Otos_i); 2099 __ verify_oop(Otos_i); 2100 __ push(atos); 2101 if (!is_static && rc == may_rewrite) { 2102 patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch); 2103 } 2104 __ ba(checkVolatile); 2105 __ delayed()->tst(Lscratch); 2106 2107 __ bind(notObj); 2108 2109 // cmp(Rflags, itos); 2110 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2111 __ delayed() ->cmp(Rflags, ltos); 2112 2113 // itos 2114 __ ld(Rclass, Roffset, Otos_i); 2115 __ push(itos); 2116 if (!is_static && rc == may_rewrite) { 2117 patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch); 2118 } 2119 __ ba(checkVolatile); 2120 __ delayed()->tst(Lscratch); 2121 2122 __ bind(notInt); 2123 2124 // cmp(Rflags, ltos); 2125 __ br(Assembler::notEqual, false, Assembler::pt, notLong); 2126 __ delayed() ->cmp(Rflags, btos); 2127 2128 // ltos 2129 // load must be atomic 2130 __ ld_long(Rclass, Roffset, Otos_l); 2131 __ push(ltos); 2132 if (!is_static && rc == may_rewrite) { 2133 patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch); 2134 } 2135 __ ba(checkVolatile); 2136 __ delayed()->tst(Lscratch); 2137 2138 __ bind(notLong); 2139 2140 // cmp(Rflags, btos); 2141 __ br(Assembler::notEqual, false, Assembler::pt, notByte); 2142 __ delayed() ->cmp(Rflags, ztos); 2143 2144 // btos 2145 __ ldsb(Rclass, Roffset, Otos_i); 2146 __ push(itos); 2147 if (!is_static && rc == may_rewrite) { 2148 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch); 2149 } 2150 __ ba(checkVolatile); 2151 __ delayed()->tst(Lscratch); 2152 2153 __ bind(notByte); 2154 2155 // cmp(Rflags, ztos); 2156 __ br(Assembler::notEqual, false, Assembler::pt, notBool); 2157 __ delayed() ->cmp(Rflags, ctos); 2158 2159 // ztos 2160 __ ldsb(Rclass, Roffset, Otos_i); 2161 __ push(itos); 2162 if (!is_static && rc == may_rewrite) { 2163 // use btos rewriting, no truncating to t/f bit is needed for getfield. 2164 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch); 2165 } 2166 __ ba(checkVolatile); 2167 __ delayed()->tst(Lscratch); 2168 2169 __ bind(notBool); 2170 2171 // cmp(Rflags, ctos); 2172 __ br(Assembler::notEqual, false, Assembler::pt, notChar); 2173 __ delayed() ->cmp(Rflags, stos); 2174 2175 // ctos 2176 __ lduh(Rclass, Roffset, Otos_i); 2177 __ push(itos); 2178 if (!is_static && rc == may_rewrite) { 2179 patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch); 2180 } 2181 __ ba(checkVolatile); 2182 __ delayed()->tst(Lscratch); 2183 2184 __ bind(notChar); 2185 2186 // cmp(Rflags, stos); 2187 __ br(Assembler::notEqual, false, Assembler::pt, notShort); 2188 __ delayed() ->cmp(Rflags, ftos); 2189 2190 // stos 2191 __ ldsh(Rclass, Roffset, Otos_i); 2192 __ push(itos); 2193 if (!is_static && rc == may_rewrite) { 2194 patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch); 2195 } 2196 __ ba(checkVolatile); 2197 __ delayed()->tst(Lscratch); 2198 2199 __ bind(notShort); 2200 2201 2202 // cmp(Rflags, ftos); 2203 __ br(Assembler::notEqual, false, Assembler::pt, notFloat); 2204 __ delayed() ->tst(Lscratch); 2205 2206 // ftos 2207 __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f); 2208 __ push(ftos); 2209 if (!is_static && rc == may_rewrite) { 2210 patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch); 2211 } 2212 __ ba(checkVolatile); 2213 __ delayed()->tst(Lscratch); 2214 2215 __ bind(notFloat); 2216 2217 2218 // dtos 2219 __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d); 2220 __ push(dtos); 2221 if (!is_static && rc == may_rewrite) { 2222 patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch); 2223 } 2224 2225 __ bind(checkVolatile); 2226 if (__ membar_has_effect(membar_bits)) { 2227 // __ tst(Lscratch); executed in delay slot 2228 __ br(Assembler::zero, false, Assembler::pt, exit); 2229 __ delayed()->nop(); 2230 volatile_barrier(membar_bits); 2231 } 2232 2233 __ bind(exit); 2234 } 2235 2236 void TemplateTable::getfield(int byte_no) { 2237 getfield_or_static(byte_no, false); 2238 } 2239 2240 void TemplateTable::nofast_getfield(int byte_no) { 2241 getfield_or_static(byte_no, false, may_not_rewrite); 2242 } 2243 2244 void TemplateTable::getstatic(int byte_no) { 2245 getfield_or_static(byte_no, true); 2246 } 2247 2248 void TemplateTable::fast_accessfield(TosState state) { 2249 transition(atos, state); 2250 Register Rcache = G3_scratch; 2251 Register index = G4_scratch; 2252 Register Roffset = G4_scratch; 2253 Register Rflags = Rcache; 2254 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2255 2256 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2257 jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true); 2258 2259 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2260 2261 __ null_check(Otos_i); 2262 __ verify_oop(Otos_i); 2263 2264 Label exit; 2265 2266 Assembler::Membar_mask_bits membar_bits = 2267 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2268 if (__ membar_has_effect(membar_bits)) { 2269 // Get volatile flag 2270 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags); 2271 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2272 } 2273 2274 switch (bytecode()) { 2275 case Bytecodes::_fast_bgetfield: 2276 __ ldsb(Otos_i, Roffset, Otos_i); 2277 break; 2278 case Bytecodes::_fast_cgetfield: 2279 __ lduh(Otos_i, Roffset, Otos_i); 2280 break; 2281 case Bytecodes::_fast_sgetfield: 2282 __ ldsh(Otos_i, Roffset, Otos_i); 2283 break; 2284 case Bytecodes::_fast_igetfield: 2285 __ ld(Otos_i, Roffset, Otos_i); 2286 break; 2287 case Bytecodes::_fast_lgetfield: 2288 __ ld_long(Otos_i, Roffset, Otos_l); 2289 break; 2290 case Bytecodes::_fast_fgetfield: 2291 __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f); 2292 break; 2293 case Bytecodes::_fast_dgetfield: 2294 __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d); 2295 break; 2296 case Bytecodes::_fast_agetfield: 2297 __ load_heap_oop(Otos_i, Roffset, Otos_i); 2298 break; 2299 default: 2300 ShouldNotReachHere(); 2301 } 2302 2303 if (__ membar_has_effect(membar_bits)) { 2304 __ btst(Lscratch, Rflags); 2305 __ br(Assembler::zero, false, Assembler::pt, exit); 2306 __ delayed()->nop(); 2307 volatile_barrier(membar_bits); 2308 __ bind(exit); 2309 } 2310 2311 if (state == atos) { 2312 __ verify_oop(Otos_i); // does not blow flags! 2313 } 2314 } 2315 2316 void TemplateTable::jvmti_post_fast_field_mod() { 2317 if (JvmtiExport::can_post_field_modification()) { 2318 // Check to see if a field modification watch has been set before we take 2319 // the time to call into the VM. 2320 Label done; 2321 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); 2322 __ load_contents(get_field_modification_count_addr, G4_scratch); 2323 __ cmp_and_br_short(G4_scratch, 0, Assembler::equal, Assembler::pt, done); 2324 __ pop_ptr(G4_scratch); // copy the object pointer from tos 2325 __ verify_oop(G4_scratch); 2326 __ push_ptr(G4_scratch); // put the object pointer back on tos 2327 __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1); 2328 // Save tos values before call_VM() clobbers them. Since we have 2329 // to do it for every data type, we use the saved values as the 2330 // jvalue object. 2331 switch (bytecode()) { // save tos values before call_VM() clobbers them 2332 case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break; 2333 case Bytecodes::_fast_bputfield: // fall through 2334 case Bytecodes::_fast_zputfield: // fall through 2335 case Bytecodes::_fast_sputfield: // fall through 2336 case Bytecodes::_fast_cputfield: // fall through 2337 case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break; 2338 case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break; 2339 case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break; 2340 // get words in right order for use as jvalue object 2341 case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break; 2342 } 2343 // setup pointer to jvalue object 2344 __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize); 2345 // G4_scratch: object pointer 2346 // G1_scratch: cache entry pointer 2347 // G3_scratch: jvalue object on the stack 2348 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch); 2349 switch (bytecode()) { // restore tos values 2350 case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break; 2351 case Bytecodes::_fast_bputfield: // fall through 2352 case Bytecodes::_fast_zputfield: // fall through 2353 case Bytecodes::_fast_sputfield: // fall through 2354 case Bytecodes::_fast_cputfield: // fall through 2355 case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break; 2356 case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break; 2357 case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break; 2358 case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break; 2359 } 2360 __ bind(done); 2361 } 2362 } 2363 2364 // The registers Rcache and index expected to be set before call. 2365 // The function may destroy various registers, just not the Rcache and index registers. 2366 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) { 2367 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2368 2369 if (JvmtiExport::can_post_field_modification()) { 2370 // Check to see if a field modification watch has been set before we take 2371 // the time to call into the VM. 2372 Label Label1; 2373 assert_different_registers(Rcache, index, G1_scratch); 2374 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); 2375 __ load_contents(get_field_modification_count_addr, G1_scratch); 2376 __ cmp_and_br_short(G1_scratch, 0, Assembler::zero, Assembler::pt, Label1); 2377 2378 // The Rcache and index registers have been already set. 2379 // This allows to eliminate this call but the Rcache and index 2380 // registers must be correspondingly used after this line. 2381 __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1); 2382 2383 __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch); 2384 if (is_static) { 2385 // Life is simple. Null out the object pointer. 2386 __ clr(G4_scratch); 2387 } else { 2388 Register Rflags = G1_scratch; 2389 // Life is harder. The stack holds the value on top, followed by the 2390 // object. We don't know the size of the value, though; it could be 2391 // one or two words depending on its type. As a result, we must find 2392 // the type to determine where the object is. 2393 2394 Label two_word, valsizeknown; 2395 __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2396 __ mov(Lesp, G4_scratch); 2397 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2398 // Make sure we don't need to mask Rflags after the above shift 2399 ConstantPoolCacheEntry::verify_tos_state_shift(); 2400 __ cmp(Rflags, ltos); 2401 __ br(Assembler::equal, false, Assembler::pt, two_word); 2402 __ delayed()->cmp(Rflags, dtos); 2403 __ br(Assembler::equal, false, Assembler::pt, two_word); 2404 __ delayed()->nop(); 2405 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1)); 2406 __ ba_short(valsizeknown); 2407 __ bind(two_word); 2408 2409 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2)); 2410 2411 __ bind(valsizeknown); 2412 // setup object pointer 2413 __ ld_ptr(G4_scratch, 0, G4_scratch); 2414 __ verify_oop(G4_scratch); 2415 } 2416 // setup pointer to jvalue object 2417 __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize); 2418 // G4_scratch: object pointer or NULL if static 2419 // G3_scratch: cache entry pointer 2420 // G1_scratch: jvalue object on the stack 2421 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), 2422 G4_scratch, G3_scratch, G1_scratch); 2423 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2424 __ bind(Label1); 2425 } 2426 } 2427 2428 void TemplateTable::pop_and_check_object(Register r) { 2429 __ pop_ptr(r); 2430 __ null_check(r); // for field access must check obj. 2431 __ verify_oop(r); 2432 } 2433 2434 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2435 transition(vtos, vtos); 2436 Register Rcache = G3_scratch; 2437 Register index = G4_scratch; 2438 Register Rclass = Rcache; 2439 Register Roffset= G4_scratch; 2440 Register Rflags = G1_scratch; 2441 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2442 2443 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2)); 2444 jvmti_post_field_mod(Rcache, index, is_static); 2445 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); 2446 2447 Assembler::Membar_mask_bits read_bits = 2448 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); 2449 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; 2450 2451 Label notVolatile, checkVolatile, exit; 2452 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { 2453 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2454 __ and3(Rflags, Lscratch, Lscratch); 2455 2456 if (__ membar_has_effect(read_bits)) { 2457 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); 2458 volatile_barrier(read_bits); 2459 __ bind(notVolatile); 2460 } 2461 } 2462 2463 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2464 // Make sure we don't need to mask Rflags after the above shift 2465 ConstantPoolCacheEntry::verify_tos_state_shift(); 2466 2467 // compute field type 2468 Label notInt, notShort, notChar, notObj, notByte, notBool, notLong, notFloat; 2469 2470 if (is_static) { 2471 // putstatic with object type most likely, check that first 2472 __ cmp(Rflags, atos); 2473 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2474 __ delayed()->cmp(Rflags, itos); 2475 2476 // atos 2477 { 2478 __ pop_ptr(); 2479 __ verify_oop(Otos_i); 2480 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, ACCESS_ON_HEAP); 2481 __ ba(checkVolatile); 2482 __ delayed()->tst(Lscratch); 2483 } 2484 2485 __ bind(notObj); 2486 // cmp(Rflags, itos); 2487 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2488 __ delayed()->cmp(Rflags, btos); 2489 2490 // itos 2491 { 2492 __ pop_i(); 2493 __ st(Otos_i, Rclass, Roffset); 2494 __ ba(checkVolatile); 2495 __ delayed()->tst(Lscratch); 2496 } 2497 2498 __ bind(notInt); 2499 } else { 2500 // putfield with int type most likely, check that first 2501 __ cmp(Rflags, itos); 2502 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2503 __ delayed()->cmp(Rflags, atos); 2504 2505 // itos 2506 { 2507 __ pop_i(); 2508 pop_and_check_object(Rclass); 2509 __ st(Otos_i, Rclass, Roffset); 2510 if (rc == may_rewrite) patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no); 2511 __ ba(checkVolatile); 2512 __ delayed()->tst(Lscratch); 2513 } 2514 2515 __ bind(notInt); 2516 // cmp(Rflags, atos); 2517 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2518 __ delayed()->cmp(Rflags, btos); 2519 2520 // atos 2521 { 2522 __ pop_ptr(); 2523 pop_and_check_object(Rclass); 2524 __ verify_oop(Otos_i); 2525 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, ACCESS_ON_HEAP); 2526 if (rc == may_rewrite) patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no); 2527 __ ba(checkVolatile); 2528 __ delayed()->tst(Lscratch); 2529 } 2530 2531 __ bind(notObj); 2532 } 2533 2534 // cmp(Rflags, btos); 2535 __ br(Assembler::notEqual, false, Assembler::pt, notByte); 2536 __ delayed()->cmp(Rflags, ztos); 2537 2538 // btos 2539 { 2540 __ pop_i(); 2541 if (!is_static) pop_and_check_object(Rclass); 2542 __ stb(Otos_i, Rclass, Roffset); 2543 if (!is_static && rc == may_rewrite) { 2544 patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no); 2545 } 2546 __ ba(checkVolatile); 2547 __ delayed()->tst(Lscratch); 2548 } 2549 2550 __ bind(notByte); 2551 2552 // cmp(Rflags, btos); 2553 __ br(Assembler::notEqual, false, Assembler::pt, notBool); 2554 __ delayed()->cmp(Rflags, ltos); 2555 2556 // ztos 2557 { 2558 __ pop_i(); 2559 if (!is_static) pop_and_check_object(Rclass); 2560 __ and3(Otos_i, 1, Otos_i); 2561 __ stb(Otos_i, Rclass, Roffset); 2562 if (!is_static && rc == may_rewrite) { 2563 patch_bytecode(Bytecodes::_fast_zputfield, G3_scratch, G4_scratch, true, byte_no); 2564 } 2565 __ ba(checkVolatile); 2566 __ delayed()->tst(Lscratch); 2567 } 2568 2569 __ bind(notBool); 2570 // cmp(Rflags, ltos); 2571 __ br(Assembler::notEqual, false, Assembler::pt, notLong); 2572 __ delayed()->cmp(Rflags, ctos); 2573 2574 // ltos 2575 { 2576 __ pop_l(); 2577 if (!is_static) pop_and_check_object(Rclass); 2578 __ st_long(Otos_l, Rclass, Roffset); 2579 if (!is_static && rc == may_rewrite) { 2580 patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no); 2581 } 2582 __ ba(checkVolatile); 2583 __ delayed()->tst(Lscratch); 2584 } 2585 2586 __ bind(notLong); 2587 // cmp(Rflags, ctos); 2588 __ br(Assembler::notEqual, false, Assembler::pt, notChar); 2589 __ delayed()->cmp(Rflags, stos); 2590 2591 // ctos (char) 2592 { 2593 __ pop_i(); 2594 if (!is_static) pop_and_check_object(Rclass); 2595 __ sth(Otos_i, Rclass, Roffset); 2596 if (!is_static && rc == may_rewrite) { 2597 patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no); 2598 } 2599 __ ba(checkVolatile); 2600 __ delayed()->tst(Lscratch); 2601 } 2602 2603 __ bind(notChar); 2604 // cmp(Rflags, stos); 2605 __ br(Assembler::notEqual, false, Assembler::pt, notShort); 2606 __ delayed()->cmp(Rflags, ftos); 2607 2608 // stos (short) 2609 { 2610 __ pop_i(); 2611 if (!is_static) pop_and_check_object(Rclass); 2612 __ sth(Otos_i, Rclass, Roffset); 2613 if (!is_static && rc == may_rewrite) { 2614 patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no); 2615 } 2616 __ ba(checkVolatile); 2617 __ delayed()->tst(Lscratch); 2618 } 2619 2620 __ bind(notShort); 2621 // cmp(Rflags, ftos); 2622 __ br(Assembler::notZero, false, Assembler::pt, notFloat); 2623 __ delayed()->nop(); 2624 2625 // ftos 2626 { 2627 __ pop_f(); 2628 if (!is_static) pop_and_check_object(Rclass); 2629 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); 2630 if (!is_static && rc == may_rewrite) { 2631 patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no); 2632 } 2633 __ ba(checkVolatile); 2634 __ delayed()->tst(Lscratch); 2635 } 2636 2637 __ bind(notFloat); 2638 2639 // dtos 2640 { 2641 __ pop_d(); 2642 if (!is_static) pop_and_check_object(Rclass); 2643 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); 2644 if (!is_static && rc == may_rewrite) { 2645 patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no); 2646 } 2647 } 2648 2649 __ bind(checkVolatile); 2650 __ tst(Lscratch); 2651 2652 if (__ membar_has_effect(write_bits)) { 2653 // __ tst(Lscratch); in delay slot 2654 __ br(Assembler::zero, false, Assembler::pt, exit); 2655 __ delayed()->nop(); 2656 volatile_barrier(Assembler::StoreLoad); 2657 __ bind(exit); 2658 } 2659 } 2660 2661 void TemplateTable::fast_storefield(TosState state) { 2662 transition(state, vtos); 2663 Register Rcache = G3_scratch; 2664 Register Rclass = Rcache; 2665 Register Roffset= G4_scratch; 2666 Register Rflags = G1_scratch; 2667 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2668 2669 jvmti_post_fast_field_mod(); 2670 2671 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1); 2672 2673 Assembler::Membar_mask_bits read_bits = 2674 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); 2675 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; 2676 2677 Label notVolatile, checkVolatile, exit; 2678 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { 2679 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2680 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2681 __ and3(Rflags, Lscratch, Lscratch); 2682 if (__ membar_has_effect(read_bits)) { 2683 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); 2684 volatile_barrier(read_bits); 2685 __ bind(notVolatile); 2686 } 2687 } 2688 2689 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2690 pop_and_check_object(Rclass); 2691 2692 switch (bytecode()) { 2693 case Bytecodes::_fast_zputfield: __ and3(Otos_i, 1, Otos_i); // fall through to bputfield 2694 case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break; 2695 case Bytecodes::_fast_cputfield: /* fall through */ 2696 case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break; 2697 case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break; 2698 case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break; 2699 case Bytecodes::_fast_fputfield: 2700 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); 2701 break; 2702 case Bytecodes::_fast_dputfield: 2703 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); 2704 break; 2705 case Bytecodes::_fast_aputfield: 2706 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, ACCESS_ON_HEAP); 2707 break; 2708 default: 2709 ShouldNotReachHere(); 2710 } 2711 2712 if (__ membar_has_effect(write_bits)) { 2713 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, exit); 2714 volatile_barrier(Assembler::StoreLoad); 2715 __ bind(exit); 2716 } 2717 } 2718 2719 void TemplateTable::putfield(int byte_no) { 2720 putfield_or_static(byte_no, false); 2721 } 2722 2723 void TemplateTable::nofast_putfield(int byte_no) { 2724 putfield_or_static(byte_no, false, may_not_rewrite); 2725 } 2726 2727 void TemplateTable::putstatic(int byte_no) { 2728 putfield_or_static(byte_no, true); 2729 } 2730 2731 void TemplateTable::fast_xaccess(TosState state) { 2732 transition(vtos, state); 2733 Register Rcache = G3_scratch; 2734 Register Roffset = G4_scratch; 2735 Register Rflags = G4_scratch; 2736 Register Rreceiver = Lscratch; 2737 2738 __ ld_ptr(Llocals, 0, Rreceiver); 2739 2740 // access constant pool cache (is resolved) 2741 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2); 2742 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset); 2743 __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp 2744 2745 __ verify_oop(Rreceiver); 2746 __ null_check(Rreceiver); 2747 if (state == atos) { 2748 __ load_heap_oop(Rreceiver, Roffset, Otos_i); 2749 } else if (state == itos) { 2750 __ ld (Rreceiver, Roffset, Otos_i) ; 2751 } else if (state == ftos) { 2752 __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f); 2753 } else { 2754 ShouldNotReachHere(); 2755 } 2756 2757 Assembler::Membar_mask_bits membar_bits = 2758 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2759 if (__ membar_has_effect(membar_bits)) { 2760 2761 // Get is_volatile value in Rflags and check if membar is needed 2762 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags); 2763 2764 // Test volatile 2765 Label notVolatile; 2766 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2767 __ btst(Rflags, Lscratch); 2768 __ br(Assembler::zero, false, Assembler::pt, notVolatile); 2769 __ delayed()->nop(); 2770 volatile_barrier(membar_bits); 2771 __ bind(notVolatile); 2772 } 2773 2774 __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 2775 __ sub(Lbcp, 1, Lbcp); 2776 } 2777 2778 //---------------------------------------------------------------------------------------------------- 2779 // Calls 2780 2781 void TemplateTable::count_calls(Register method, Register temp) { 2782 // implemented elsewhere 2783 ShouldNotReachHere(); 2784 } 2785 2786 void TemplateTable::prepare_invoke(int byte_no, 2787 Register method, // linked method (or i-klass) 2788 Register ra, // return address 2789 Register index, // itable index, MethodType, etc. 2790 Register recv, // if caller wants to see it 2791 Register flags // if caller wants to test it 2792 ) { 2793 // determine flags 2794 const Bytecodes::Code code = bytecode(); 2795 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 2796 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 2797 const bool is_invokehandle = code == Bytecodes::_invokehandle; 2798 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 2799 const bool is_invokespecial = code == Bytecodes::_invokespecial; 2800 const bool load_receiver = (recv != noreg); 2801 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 2802 assert(recv == noreg || recv == O0, ""); 2803 assert(flags == noreg || flags == O1, ""); 2804 2805 // setup registers & access constant pool cache 2806 if (recv == noreg) recv = O0; 2807 if (flags == noreg) flags = O1; 2808 const Register temp = O2; 2809 assert_different_registers(method, ra, index, recv, flags, temp); 2810 2811 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); 2812 2813 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 2814 2815 // maybe push appendix to arguments 2816 if (is_invokedynamic || is_invokehandle) { 2817 Label L_no_push; 2818 __ set((1 << ConstantPoolCacheEntry::has_appendix_shift), temp); 2819 __ btst(flags, temp); 2820 __ br(Assembler::zero, false, Assembler::pt, L_no_push); 2821 __ delayed()->nop(); 2822 // Push the appendix as a trailing parameter. 2823 // This must be done before we get the receiver, 2824 // since the parameter_size includes it. 2825 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0"); 2826 __ load_resolved_reference_at_index(temp, index); 2827 __ verify_oop(temp); 2828 __ push_ptr(temp); // push appendix (MethodType, CallSite, etc.) 2829 __ bind(L_no_push); 2830 } 2831 2832 // load receiver if needed (after appendix is pushed so parameter size is correct) 2833 if (load_receiver) { 2834 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, temp); // get parameter size 2835 __ load_receiver(temp, recv); // __ argument_address uses Gargs but we need Lesp 2836 __ verify_oop(recv); 2837 } 2838 2839 // compute return type 2840 __ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra); 2841 // Make sure we don't need to mask flags after the above shift 2842 ConstantPoolCacheEntry::verify_tos_state_shift(); 2843 // load return address 2844 { 2845 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 2846 AddressLiteral table(table_addr); 2847 __ set(table, temp); 2848 __ sll(ra, LogBytesPerWord, ra); 2849 __ ld_ptr(Address(temp, ra), ra); 2850 } 2851 } 2852 2853 2854 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) { 2855 Register Rtemp = G4_scratch; 2856 Register Rcall = Rindex; 2857 assert_different_registers(Rcall, G5_method, Gargs, Rret); 2858 2859 // get target Method* & entry point 2860 __ lookup_virtual_method(Rrecv, Rindex, G5_method); 2861 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 2862 __ profile_called_method(G5_method, Rtemp); 2863 __ call_from_interpreter(Rcall, Gargs, Rret); 2864 } 2865 2866 void TemplateTable::invokevirtual(int byte_no) { 2867 transition(vtos, vtos); 2868 assert(byte_no == f2_byte, "use this argument"); 2869 2870 Register Rscratch = G3_scratch; 2871 Register Rtemp = G4_scratch; 2872 Register Rret = Lscratch; 2873 Register O0_recv = O0; 2874 Label notFinal; 2875 2876 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false); 2877 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 2878 2879 // Check for vfinal 2880 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), G4_scratch); 2881 __ btst(Rret, G4_scratch); 2882 __ br(Assembler::zero, false, Assembler::pt, notFinal); 2883 __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters 2884 2885 if (RewriteBytecodes && !UseSharedSpaces) { 2886 patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp); 2887 } 2888 2889 invokevfinal_helper(Rscratch, Rret); 2890 2891 __ bind(notFinal); 2892 2893 __ mov(G5_method, Rscratch); // better scratch register 2894 __ load_receiver(G4_scratch, O0_recv); // gets receiverOop 2895 // receiver is in O0_recv 2896 __ verify_oop(O0_recv); 2897 2898 // get return address 2899 AddressLiteral table(Interpreter::invoke_return_entry_table()); 2900 __ set(table, Rtemp); 2901 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type 2902 // Make sure we don't need to mask Rret after the above shift 2903 ConstantPoolCacheEntry::verify_tos_state_shift(); 2904 __ sll(Rret, LogBytesPerWord, Rret); 2905 __ ld_ptr(Rtemp, Rret, Rret); // get return address 2906 2907 // get receiver klass 2908 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); 2909 __ load_klass(O0_recv, O0_recv); 2910 __ verify_klass_ptr(O0_recv); 2911 2912 __ profile_virtual_call(O0_recv, O4); 2913 2914 generate_vtable_call(O0_recv, Rscratch, Rret); 2915 } 2916 2917 void TemplateTable::fast_invokevfinal(int byte_no) { 2918 transition(vtos, vtos); 2919 assert(byte_no == f2_byte, "use this argument"); 2920 2921 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true, 2922 /*is_invokevfinal*/true, false); 2923 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 2924 invokevfinal_helper(G3_scratch, Lscratch); 2925 } 2926 2927 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) { 2928 Register Rtemp = G4_scratch; 2929 2930 // Load receiver from stack slot 2931 __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G4_scratch); 2932 __ lduh(G4_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), G4_scratch); 2933 __ load_receiver(G4_scratch, O0); 2934 2935 // receiver NULL check 2936 __ null_check(O0); 2937 2938 __ profile_final_call(O4); 2939 __ profile_arguments_type(G5_method, Rscratch, Gargs, true); 2940 2941 // get return address 2942 AddressLiteral table(Interpreter::invoke_return_entry_table()); 2943 __ set(table, Rtemp); 2944 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type 2945 // Make sure we don't need to mask Rret after the above shift 2946 ConstantPoolCacheEntry::verify_tos_state_shift(); 2947 __ sll(Rret, LogBytesPerWord, Rret); 2948 __ ld_ptr(Rtemp, Rret, Rret); // get return address 2949 2950 2951 // do the call 2952 __ call_from_interpreter(Rscratch, Gargs, Rret); 2953 } 2954 2955 2956 void TemplateTable::invokespecial(int byte_no) { 2957 transition(vtos, vtos); 2958 assert(byte_no == f1_byte, "use this argument"); 2959 2960 const Register Rret = Lscratch; 2961 const Register O0_recv = O0; 2962 const Register Rscratch = G3_scratch; 2963 2964 prepare_invoke(byte_no, G5_method, Rret, noreg, O0_recv); // get receiver also for null check 2965 __ null_check(O0_recv); 2966 2967 // do the call 2968 __ profile_call(O4); 2969 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 2970 __ call_from_interpreter(Rscratch, Gargs, Rret); 2971 } 2972 2973 2974 void TemplateTable::invokestatic(int byte_no) { 2975 transition(vtos, vtos); 2976 assert(byte_no == f1_byte, "use this argument"); 2977 2978 const Register Rret = Lscratch; 2979 const Register Rscratch = G3_scratch; 2980 2981 prepare_invoke(byte_no, G5_method, Rret); // get f1 Method* 2982 2983 // do the call 2984 __ profile_call(O4); 2985 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 2986 __ call_from_interpreter(Rscratch, Gargs, Rret); 2987 } 2988 2989 void TemplateTable::invokeinterface_object_method(Register RKlass, 2990 Register Rcall, 2991 Register Rret, 2992 Register Rflags) { 2993 Register Rscratch = G4_scratch; 2994 Register Rindex = Lscratch; 2995 2996 assert_different_registers(Rscratch, Rindex, Rret); 2997 2998 Label notFinal; 2999 3000 // Check for vfinal 3001 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch); 3002 __ btst(Rflags, Rscratch); 3003 __ br(Assembler::zero, false, Assembler::pt, notFinal); 3004 __ delayed()->nop(); 3005 3006 __ profile_final_call(O4); 3007 3008 // do the call - the index (f2) contains the Method* 3009 assert_different_registers(G5_method, Gargs, Rcall); 3010 __ mov(Rindex, G5_method); 3011 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 3012 __ call_from_interpreter(Rcall, Gargs, Rret); 3013 __ bind(notFinal); 3014 3015 __ profile_virtual_call(RKlass, O4); 3016 generate_vtable_call(RKlass, Rindex, Rret); 3017 } 3018 3019 3020 void TemplateTable::invokeinterface(int byte_no) { 3021 transition(vtos, vtos); 3022 assert(byte_no == f1_byte, "use this argument"); 3023 3024 const Register Rinterface = G1_scratch; 3025 const Register Rret = G3_scratch; 3026 const Register Rindex = Lscratch; 3027 const Register O0_recv = O0; 3028 const Register O1_flags = O1; 3029 const Register O2_Klass = O2; 3030 const Register Rscratch = G4_scratch; 3031 assert_different_registers(Rscratch, G5_method); 3032 3033 prepare_invoke(byte_no, Rinterface, Rret, Rindex, O0_recv, O1_flags); 3034 3035 // get receiver klass 3036 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); 3037 __ load_klass(O0_recv, O2_Klass); 3038 3039 // Special case of invokeinterface called for virtual method of 3040 // java.lang.Object. See cpCacheOop.cpp for details. 3041 // This code isn't produced by javac, but could be produced by 3042 // another compliant java compiler. 3043 Label notMethod; 3044 __ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch); 3045 __ btst(O1_flags, Rscratch); 3046 __ br(Assembler::zero, false, Assembler::pt, notMethod); 3047 __ delayed()->nop(); 3048 3049 invokeinterface_object_method(O2_Klass, Rinterface, Rret, O1_flags); 3050 3051 __ bind(notMethod); 3052 3053 __ profile_virtual_call(O2_Klass, O4); 3054 3055 // 3056 // find entry point to call 3057 // 3058 3059 // compute start of first itableOffsetEntry (which is at end of vtable) 3060 const int base = in_bytes(Klass::vtable_start_offset()); 3061 Label search; 3062 Register Rtemp = O1_flags; 3063 3064 __ ld(O2_Klass, in_bytes(Klass::vtable_length_offset()), Rtemp); 3065 __ sll(Rtemp, LogBytesPerWord, Rtemp); // Rscratch *= 4; 3066 if (Assembler::is_simm13(base)) { 3067 __ add(Rtemp, base, Rtemp); 3068 } else { 3069 __ set(base, Rscratch); 3070 __ add(Rscratch, Rtemp, Rtemp); 3071 } 3072 __ add(O2_Klass, Rtemp, Rscratch); 3073 3074 __ bind(search); 3075 3076 __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp); 3077 { 3078 Label ok; 3079 3080 // Check that entry is non-null. Null entries are probably a bytecode 3081 // problem. If the interface isn't implemented by the receiver class, 3082 // the VM should throw IncompatibleClassChangeError. linkResolver checks 3083 // this too but that's only if the entry isn't already resolved, so we 3084 // need to check again. 3085 __ br_notnull_short( Rtemp, Assembler::pt, ok); 3086 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); 3087 __ should_not_reach_here(); 3088 __ bind(ok); 3089 } 3090 3091 __ cmp(Rinterface, Rtemp); 3092 __ brx(Assembler::notEqual, true, Assembler::pn, search); 3093 __ delayed()->add(Rscratch, itableOffsetEntry::size() * wordSize, Rscratch); 3094 3095 // entry found and Rscratch points to it 3096 __ ld(Rscratch, itableOffsetEntry::offset_offset_in_bytes(), Rscratch); 3097 3098 assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below"); 3099 __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8; 3100 __ add(Rscratch, Rindex, Rscratch); 3101 __ ld_ptr(O2_Klass, Rscratch, G5_method); 3102 3103 // Check for abstract method error. 3104 { 3105 Label ok; 3106 __ br_notnull_short(G5_method, Assembler::pt, ok); 3107 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 3108 __ should_not_reach_here(); 3109 __ bind(ok); 3110 } 3111 3112 Register Rcall = Rinterface; 3113 assert_different_registers(Rcall, G5_method, Gargs, Rret); 3114 3115 __ profile_arguments_type(G5_method, Rcall, Gargs, true); 3116 __ profile_called_method(G5_method, Rscratch); 3117 __ call_from_interpreter(Rcall, Gargs, Rret); 3118 } 3119 3120 void TemplateTable::invokehandle(int byte_no) { 3121 transition(vtos, vtos); 3122 assert(byte_no == f1_byte, "use this argument"); 3123 3124 const Register Rret = Lscratch; 3125 const Register G4_mtype = G4_scratch; 3126 const Register O0_recv = O0; 3127 const Register Rscratch = G3_scratch; 3128 3129 prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv); 3130 __ null_check(O0_recv); 3131 3132 // G4: MethodType object (from cpool->resolved_references[f1], if necessary) 3133 // G5: MH.invokeExact_MT method (from f2) 3134 3135 // Note: G4_mtype is already pushed (if necessary) by prepare_invoke 3136 3137 // do the call 3138 __ verify_oop(G4_mtype); 3139 __ profile_final_call(O4); // FIXME: profile the LambdaForm also 3140 __ profile_arguments_type(G5_method, Rscratch, Gargs, true); 3141 __ call_from_interpreter(Rscratch, Gargs, Rret); 3142 } 3143 3144 3145 void TemplateTable::invokedynamic(int byte_no) { 3146 transition(vtos, vtos); 3147 assert(byte_no == f1_byte, "use this argument"); 3148 3149 const Register Rret = Lscratch; 3150 const Register G4_callsite = G4_scratch; 3151 const Register Rscratch = G3_scratch; 3152 3153 prepare_invoke(byte_no, G5_method, Rret, G4_callsite); 3154 3155 // G4: CallSite object (from cpool->resolved_references[f1]) 3156 // G5: MH.linkToCallSite method (from f2) 3157 3158 // Note: G4_callsite is already pushed by prepare_invoke 3159 3160 // %%% should make a type profile for any invokedynamic that takes a ref argument 3161 // profile this call 3162 __ profile_call(O4); 3163 3164 // do the call 3165 __ verify_oop(G4_callsite); 3166 __ profile_arguments_type(G5_method, Rscratch, Gargs, false); 3167 __ call_from_interpreter(Rscratch, Gargs, Rret); 3168 } 3169 3170 3171 //---------------------------------------------------------------------------------------------------- 3172 // Allocation 3173 3174 void TemplateTable::_new() { 3175 transition(vtos, atos); 3176 3177 Label slow_case; 3178 Label done; 3179 Label initialize_header; 3180 Label initialize_object; // including clearing the fields 3181 3182 Register RallocatedObject = Otos_i; 3183 Register RinstanceKlass = O1; 3184 Register Roffset = O3; 3185 Register Rscratch = O4; 3186 3187 __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3188 __ get_cpool_and_tags(Rscratch, G3_scratch); 3189 // make sure the class we're about to instantiate has been resolved 3190 // This is done before loading InstanceKlass to be consistent with the order 3191 // how Constant Pool is updated (see ConstantPool::klass_at_put) 3192 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3193 __ ldub(G3_scratch, Roffset, G3_scratch); 3194 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3195 __ br(Assembler::notEqual, false, Assembler::pn, slow_case); 3196 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3197 // get InstanceKlass 3198 //__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot 3199 __ add(Roffset, sizeof(ConstantPool), Roffset); 3200 __ ld_ptr(Rscratch, Roffset, RinstanceKlass); 3201 3202 // make sure klass is fully initialized: 3203 __ ldub(RinstanceKlass, in_bytes(InstanceKlass::init_state_offset()), G3_scratch); 3204 __ cmp(G3_scratch, InstanceKlass::fully_initialized); 3205 __ br(Assembler::notEqual, false, Assembler::pn, slow_case); 3206 __ delayed()->ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset); 3207 3208 // get instance_size in InstanceKlass (already aligned) 3209 //__ ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset); 3210 3211 // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class 3212 __ btst(Klass::_lh_instance_slow_path_bit, Roffset); 3213 __ br(Assembler::notZero, false, Assembler::pn, slow_case); 3214 __ delayed()->nop(); 3215 3216 // allocate the instance 3217 // 1) Try to allocate in the TLAB 3218 // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden 3219 // 3) if the above fails (or is not applicable), go to a slow case 3220 // (creates a new TLAB, etc.) 3221 3222 const bool allow_shared_alloc = 3223 Universe::heap()->supports_inline_contig_alloc(); 3224 3225 if(UseTLAB) { 3226 Register RoldTopValue = RallocatedObject; 3227 Register RtlabWasteLimitValue = G3_scratch; 3228 Register RnewTopValue = G1_scratch; 3229 Register RendValue = Rscratch; 3230 Register RfreeValue = RnewTopValue; 3231 3232 // check if we can allocate in the TLAB 3233 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject 3234 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue); 3235 __ add(RoldTopValue, Roffset, RnewTopValue); 3236 3237 // if there is enough space, we do not CAS and do not clear 3238 __ cmp(RnewTopValue, RendValue); 3239 if(ZeroTLAB) { 3240 // the fields have already been cleared 3241 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header); 3242 } else { 3243 // initialize both the header and fields 3244 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object); 3245 } 3246 __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3247 3248 if (allow_shared_alloc) { 3249 // Check if tlab should be discarded (refill_waste_limit >= free) 3250 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue); 3251 __ sub(RendValue, RoldTopValue, RfreeValue); 3252 __ srlx(RfreeValue, LogHeapWordSize, RfreeValue); 3253 __ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small 3254 3255 // increment waste limit to prevent getting stuck on this slow path 3256 if (Assembler::is_simm13(ThreadLocalAllocBuffer::refill_waste_limit_increment())) { 3257 __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue); 3258 } else { 3259 // set64 does not use the temp register if the given constant is 32 bit. So 3260 // we can just use any register; using G0 results in ignoring of the upper 32 bit 3261 // of that value. 3262 __ set64(ThreadLocalAllocBuffer::refill_waste_limit_increment(), G4_scratch, G0); 3263 __ add(RtlabWasteLimitValue, G4_scratch, RtlabWasteLimitValue); 3264 } 3265 __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); 3266 } else { 3267 // No allocation in the shared eden. 3268 __ ba_short(slow_case); 3269 } 3270 } 3271 3272 // Allocation in the shared Eden 3273 if (allow_shared_alloc) { 3274 Register RoldTopValue = G1_scratch; 3275 Register RtopAddr = G3_scratch; 3276 Register RnewTopValue = RallocatedObject; 3277 Register RendValue = Rscratch; 3278 3279 __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr); 3280 3281 Label retry; 3282 __ bind(retry); 3283 __ set((intptr_t)Universe::heap()->end_addr(), RendValue); 3284 __ ld_ptr(RendValue, 0, RendValue); 3285 __ ld_ptr(RtopAddr, 0, RoldTopValue); 3286 __ add(RoldTopValue, Roffset, RnewTopValue); 3287 3288 // RnewTopValue contains the top address after the new object 3289 // has been allocated. 3290 __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case); 3291 3292 __ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue); 3293 3294 // if someone beat us on the allocation, try again, otherwise continue 3295 __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry); 3296 3297 // bump total bytes allocated by this thread 3298 // RoldTopValue and RtopAddr are dead, so can use G1 and G3 3299 __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch); 3300 } 3301 3302 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { 3303 // clear object fields 3304 __ bind(initialize_object); 3305 __ deccc(Roffset, sizeof(oopDesc)); 3306 __ br(Assembler::zero, false, Assembler::pt, initialize_header); 3307 __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch); 3308 3309 // initialize remaining object fields 3310 if (UseBlockZeroing) { 3311 // Use BIS for zeroing 3312 __ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header); 3313 } else { 3314 Label loop; 3315 __ subcc(Roffset, wordSize, Roffset); 3316 __ bind(loop); 3317 //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot 3318 __ st_ptr(G0, G3_scratch, Roffset); 3319 __ br(Assembler::notEqual, false, Assembler::pt, loop); 3320 __ delayed()->subcc(Roffset, wordSize, Roffset); 3321 } 3322 __ ba_short(initialize_header); 3323 } 3324 3325 // slow case 3326 __ bind(slow_case); 3327 __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned); 3328 __ get_constant_pool(O1); 3329 3330 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2); 3331 3332 __ ba_short(done); 3333 3334 // Initialize the header: mark, klass 3335 __ bind(initialize_header); 3336 3337 if (UseBiasedLocking) { 3338 __ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch); 3339 } else { 3340 __ set((intptr_t)markOopDesc::prototype(), G4_scratch); 3341 } 3342 __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark 3343 __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed 3344 __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms) 3345 3346 { 3347 SkipIfEqual skip_if( 3348 _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero); 3349 // Trigger dtrace event 3350 __ push(atos); 3351 __ call_VM_leaf(noreg, 3352 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0); 3353 __ pop(atos); 3354 } 3355 3356 // continue 3357 __ bind(done); 3358 } 3359 3360 3361 3362 void TemplateTable::newarray() { 3363 transition(itos, atos); 3364 __ ldub(Lbcp, 1, O1); 3365 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i); 3366 } 3367 3368 3369 void TemplateTable::anewarray() { 3370 transition(itos, atos); 3371 __ get_constant_pool(O1); 3372 __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned); 3373 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i); 3374 } 3375 3376 3377 void TemplateTable::arraylength() { 3378 transition(atos, itos); 3379 Label ok; 3380 __ verify_oop(Otos_i); 3381 __ tst(Otos_i); 3382 __ throw_if_not_1_x( Assembler::notZero, ok ); 3383 __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i); 3384 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok); 3385 } 3386 3387 3388 void TemplateTable::checkcast() { 3389 transition(atos, atos); 3390 Label done, is_null, quicked, cast_ok, resolved; 3391 Register Roffset = G1_scratch; 3392 Register RobjKlass = O5; 3393 Register RspecifiedKlass = O4; 3394 3395 // Check for casting a NULL 3396 __ br_null(Otos_i, false, Assembler::pn, is_null); 3397 __ delayed()->nop(); 3398 3399 // Get value klass in RobjKlass 3400 __ load_klass(Otos_i, RobjKlass); // get value klass 3401 3402 // Get constant pool tag 3403 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3404 3405 // See if the checkcast has been quickened 3406 __ get_cpool_and_tags(Lscratch, G3_scratch); 3407 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3408 __ ldub(G3_scratch, Roffset, G3_scratch); 3409 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3410 __ br(Assembler::equal, true, Assembler::pt, quicked); 3411 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3412 3413 __ push_ptr(); // save receiver for result, and for GC 3414 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); 3415 __ get_vm_result_2(RspecifiedKlass); 3416 __ pop_ptr(Otos_i, G3_scratch); // restore receiver 3417 3418 __ ba_short(resolved); 3419 3420 // Extract target class from constant pool 3421 __ bind(quicked); 3422 __ add(Roffset, sizeof(ConstantPool), Roffset); 3423 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass); 3424 __ bind(resolved); 3425 __ load_klass(Otos_i, RobjKlass); // get value klass 3426 3427 // Generate a fast subtype check. Branch to cast_ok if no 3428 // failure. Throw exception if failure. 3429 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok ); 3430 3431 // Not a subtype; so must throw exception 3432 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch ); 3433 3434 __ bind(cast_ok); 3435 3436 if (ProfileInterpreter) { 3437 __ ba_short(done); 3438 } 3439 __ bind(is_null); 3440 __ profile_null_seen(G3_scratch); 3441 __ bind(done); 3442 } 3443 3444 3445 void TemplateTable::instanceof() { 3446 Label done, is_null, quicked, resolved; 3447 transition(atos, itos); 3448 Register Roffset = G1_scratch; 3449 Register RobjKlass = O5; 3450 Register RspecifiedKlass = O4; 3451 3452 // Check for casting a NULL 3453 __ br_null(Otos_i, false, Assembler::pt, is_null); 3454 __ delayed()->nop(); 3455 3456 // Get value klass in RobjKlass 3457 __ load_klass(Otos_i, RobjKlass); // get value klass 3458 3459 // Get constant pool tag 3460 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3461 3462 // See if the checkcast has been quickened 3463 __ get_cpool_and_tags(Lscratch, G3_scratch); 3464 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3465 __ ldub(G3_scratch, Roffset, G3_scratch); 3466 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3467 __ br(Assembler::equal, true, Assembler::pt, quicked); 3468 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3469 3470 __ push_ptr(); // save receiver for result, and for GC 3471 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); 3472 __ get_vm_result_2(RspecifiedKlass); 3473 __ pop_ptr(Otos_i, G3_scratch); // restore receiver 3474 3475 __ ba_short(resolved); 3476 3477 // Extract target class from constant pool 3478 __ bind(quicked); 3479 __ add(Roffset, sizeof(ConstantPool), Roffset); 3480 __ get_constant_pool(Lscratch); 3481 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass); 3482 __ bind(resolved); 3483 __ load_klass(Otos_i, RobjKlass); // get value klass 3484 3485 // Generate a fast subtype check. Branch to cast_ok if no 3486 // failure. Return 0 if failure. 3487 __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed 3488 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done ); 3489 // Not a subtype; return 0; 3490 __ clr( Otos_i ); 3491 3492 if (ProfileInterpreter) { 3493 __ ba_short(done); 3494 } 3495 __ bind(is_null); 3496 __ profile_null_seen(G3_scratch); 3497 __ bind(done); 3498 } 3499 3500 void TemplateTable::_breakpoint() { 3501 3502 // Note: We get here even if we are single stepping.. 3503 // jbug inists on setting breakpoints at every bytecode 3504 // even if we are in single step mode. 3505 3506 transition(vtos, vtos); 3507 // get the unpatched byte code 3508 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp); 3509 __ mov(O0, Lbyte_code); 3510 3511 // post the breakpoint event 3512 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp); 3513 3514 // complete the execution of original bytecode 3515 __ dispatch_normal(vtos); 3516 } 3517 3518 3519 //---------------------------------------------------------------------------------------------------- 3520 // Exceptions 3521 3522 void TemplateTable::athrow() { 3523 transition(atos, vtos); 3524 3525 // This works because exception is cached in Otos_i which is same as O0, 3526 // which is same as what throw_exception_entry_expects 3527 assert(Otos_i == Oexception, "see explanation above"); 3528 3529 __ verify_oop(Otos_i); 3530 __ null_check(Otos_i); 3531 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch); 3532 } 3533 3534 3535 //---------------------------------------------------------------------------------------------------- 3536 // Synchronization 3537 3538 3539 // See frame_sparc.hpp for monitor block layout. 3540 // Monitor elements are dynamically allocated by growing stack as needed. 3541 3542 void TemplateTable::monitorenter() { 3543 transition(atos, vtos); 3544 __ verify_oop(Otos_i); 3545 // Try to acquire a lock on the object 3546 // Repeat until succeeded (i.e., until 3547 // monitorenter returns true). 3548 3549 { Label ok; 3550 __ tst(Otos_i); 3551 __ throw_if_not_1_x( Assembler::notZero, ok); 3552 __ delayed()->mov(Otos_i, Lscratch); // save obj 3553 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok); 3554 } 3555 3556 assert(O0 == Otos_i, "Be sure where the object to lock is"); 3557 3558 // find a free slot in the monitor block 3559 3560 3561 // initialize entry pointer 3562 __ clr(O1); // points to free slot or NULL 3563 3564 { 3565 Label entry, loop, exit; 3566 __ add( __ top_most_monitor(), O2 ); // last one to check 3567 __ ba( entry ); 3568 __ delayed()->mov( Lmonitors, O3 ); // first one to check 3569 3570 3571 __ bind( loop ); 3572 3573 __ verify_oop(O4); // verify each monitor's oop 3574 __ tst(O4); // is this entry unused? 3575 __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1); 3576 3577 __ cmp(O4, O0); // check if current entry is for same object 3578 __ brx( Assembler::equal, false, Assembler::pn, exit ); 3579 __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one 3580 3581 __ bind( entry ); 3582 3583 __ cmp( O3, O2 ); 3584 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop ); 3585 __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4); 3586 3587 __ bind( exit ); 3588 } 3589 3590 { Label allocated; 3591 3592 // found free slot? 3593 __ br_notnull_short(O1, Assembler::pn, allocated); 3594 3595 __ add_monitor_to_stack( false, O2, O3 ); 3596 __ mov(Lmonitors, O1); 3597 3598 __ bind(allocated); 3599 } 3600 3601 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 3602 // The object has already been poped from the stack, so the expression stack looks correct. 3603 __ inc(Lbcp); 3604 3605 __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object 3606 __ lock_object(O1, O0); 3607 3608 // check if there's enough space on the stack for the monitors after locking 3609 __ generate_stack_overflow_check(0); 3610 3611 // The bcp has already been incremented. Just need to dispatch to next instruction. 3612 __ dispatch_next(vtos); 3613 } 3614 3615 3616 void TemplateTable::monitorexit() { 3617 transition(atos, vtos); 3618 __ verify_oop(Otos_i); 3619 __ tst(Otos_i); 3620 __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch ); 3621 3622 assert(O0 == Otos_i, "just checking"); 3623 3624 { Label entry, loop, found; 3625 __ add( __ top_most_monitor(), O2 ); // last one to check 3626 __ ba(entry); 3627 // use Lscratch to hold monitor elem to check, start with most recent monitor, 3628 // By using a local it survives the call to the C routine. 3629 __ delayed()->mov( Lmonitors, Lscratch ); 3630 3631 __ bind( loop ); 3632 3633 __ verify_oop(O4); // verify each monitor's oop 3634 __ cmp(O4, O0); // check if current entry is for desired object 3635 __ brx( Assembler::equal, true, Assembler::pt, found ); 3636 __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit 3637 3638 __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next 3639 3640 __ bind( entry ); 3641 3642 __ cmp( Lscratch, O2 ); 3643 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop ); 3644 __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4); 3645 3646 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 3647 __ should_not_reach_here(); 3648 3649 __ bind(found); 3650 } 3651 __ unlock_object(O1); 3652 } 3653 3654 3655 //---------------------------------------------------------------------------------------------------- 3656 // Wide instructions 3657 3658 void TemplateTable::wide() { 3659 transition(vtos, vtos); 3660 __ ldub(Lbcp, 1, G3_scratch);// get next bc 3661 __ sll(G3_scratch, LogBytesPerWord, G3_scratch); 3662 AddressLiteral ep(Interpreter::_wentry_point); 3663 __ set(ep, G4_scratch); 3664 __ ld_ptr(G4_scratch, G3_scratch, G3_scratch); 3665 __ jmp(G3_scratch, G0); 3666 __ delayed()->nop(); 3667 // Note: the Lbcp increment step is part of the individual wide bytecode implementations 3668 } 3669 3670 3671 //---------------------------------------------------------------------------------------------------- 3672 // Multi arrays 3673 3674 void TemplateTable::multianewarray() { 3675 transition(vtos, atos); 3676 // put ndims * wordSize into Lscratch 3677 __ ldub( Lbcp, 3, Lscratch); 3678 __ sll( Lscratch, Interpreter::logStackElementSize, Lscratch); 3679 // Lesp points past last_dim, so set to O1 to first_dim address 3680 __ add( Lesp, Lscratch, O1); 3681 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1); 3682 __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack 3683 }